Я пытаюсь получить доступ к файлам s3 с помощью команд оболочки Hadoop, и когда я выполняю приведенную ниже команду, я получаю эту ошибку.
Зачем я это сделал
Я установил одиночный узел Hadoop (hadoop-2.6.1) и добавил (hadoop aws jar и aws jdk jar также в classpath)
Команда, которую я выполнил
hdfs dfs -ls s3a://s3-us-west-2.amazonaws.com/azpoc1/
Ошибка
ubuntu@ip-172-31-2-211:~/hadoop-2.6.1$ hdfs dfs -ls s3a://s3-us-west-2.amazonaws.com/azpoc1/
-ls: Fatal internal error
com.amazonaws.services.s3.model.AmazonS3Exception: Forbidden (Service: Amazon S3; Status Code: 403; Error Code: 403 Forbidden; Request ID: FC80B14D00C2FBE0; S3 Extended Request ID: TAHwxzqjMF8CD3bTnyaRGwpAgQnu0DsUFWL/E1llrXDfS+CqEMq6K735Koh7QkpSwEe8jzIOIX0=), S3 Extended Request ID: TAHwxzqjMF8CD3bTnyaRGwpAgQnu0DsUFWL/E1llrXDfS+CqEMq6K735Koh7QkpSwEe8jzIOIX0=
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1632)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1304)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1058)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:743)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:717)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4365)
at com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4312)
at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:1270)
at com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:1245)
at org.apache.hadoop.fs.s3a.S3AFileSystem.getFileStatus(S3AFileSystem.java:688)
at org.apache.hadoop.fs.s3a.S3AFileSystem.getFileStatus(S3AFileSystem.java:71)
at org.apache.hadoop.fs.Globber.getFileStatus(Globber.java:57)
at org.apache.hadoop.fs.Globber.glob(Globber.java:252)
at org.apache.hadoop.fs.FileSystem.globStatus(FileSystem.java:1625)
at org.apache.hadoop.fs.shell.PathData.expandAsGlob(PathData.java:326)
at org.apache.hadoop.fs.shell.Command.expandArgument(Command.java:224)
at org.apache.hadoop.fs.shell.Command.expandArguments(Command.java:207)
at org.apache.hadoop.fs.shell.Command.processRawArguments(Command.java:190)
at org.apache.hadoop.fs.shell.Command.run(Command.java:154)
at org.apache.hadoop.fs.FsShell.run(FsShell.java:287)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84)
at org.apache.hadoop.fs.FsShell.main(FsShell.java:340)
Мой файл core-site.xml
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:50000</value>
</property>
<property>
<name>fs.s3a.access.key</name>
<value>*****</value>
</property>
<property>
<name>fs.s3a.secret.key</name>
<value>*****</value>
</property>
<property>
<name>fs.s3a.impl</name>
<value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
</property>
</configuration>