Я пытаюсь настроить oop (автономный режим) на windows server 2016, я отформатировал namenode и затем запустил команду start-all.cmd, чтобы запустить все службы, и я могу видеть менеджер ресурсов в порте по умолчанию 8088, но мои namenode и datanode отказываются запускаться. Здесь я использую Hadoop_2.9.2, java версия "1.8.0_241", и я скачал файлы oop (с winutils.exe) и имел oop .dll) по этой ссылке https://github.com/cdarlint/winutils
Я добавил пути hadoop / bin, hadoop / sbin и hadoop / bin / had oop .dll в системные переменные
Мои конфигурации
coresite. xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
hdfssite. xml
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///C:/Users/sudhakar4067/installs/hadoop-2.9.2/data/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///C:/Users/sudhakar4067/installs/hadoop-2.9.2/data/datanode</value>
</property>
</configuration>
mapredsite. xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
пряжа. xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
Ошибка Namenode
20/04/21 13:05:16 ERROR nodemanager.NodeManager: Error starting NodeManager
java.lang.UnsatisfiedLinkError: org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Ljava/lang/String;I)Z
at org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Native Method)
at org.apache.hadoop.io.nativeio.NativeIO$Windows.access(NativeIO.java:606)
at org.apache.hadoop.fs.FileUtil.canRead(FileUtil.java:998)
at org.apache.hadoop.util.DiskChecker.checkAccessByFileMethods(DiskChecker.java:160)
at org.apache.hadoop.util.DiskChecker.checkDirInternal(DiskChecker.java:100)
at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:77)
at org.apache.hadoop.util.BasicDiskValidator.checkStatus(BasicDiskValidator.java:32)
at org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.testDirs(DirectoryCollection.java:473)
at org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.checkDirs(DirectoryCollection.java:392)
at org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService.checkDirs(LocalDirsHandlerService.java:490)
at org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService.serviceInit(LocalDirsHandlerService.java:222)
at org.apache.hadoop.service.AbstractService.init(AbstractService.java:164)
at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:108)
at org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService.serviceInit(NodeHealthCheckerService.java:59)
at org.apache.hadoop.service.AbstractService.init(AbstractService.java:164)
at org.apache.hadoop.service.CompositeService.serviceInit(CompositeService.java:108)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceInit(NodeManager.java:451)
at org.apache.hadoop.service.AbstractService.init(AbstractService.java:164)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:844)
at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:912)
20/04/21 13:05:16 INFO impl.MetricsSystemImpl: Stopping NodeManager metrics system...
20/04/21 13:05:16 INFO impl.MetricsSystemImpl: NodeManager metrics system stopped.
20/04/21 13:05:16 INFO impl.MetricsSystemImpl: NodeManager metrics system shutdown complete.
20/04/21 13:05:16 INFO nodemanager.NodeManager: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NodeManager at trnd-5/10.128.0.9
************************************************************/
Ошибка Datanode
java.lang.RuntimeException: Error while running command to get file permissions : ExitCodeException exitCode=-1073741701:
at org.apache.hadoop.util.Shell.runCommand(Shell.java:998)
at org.apache.hadoop.util.Shell.run(Shell.java:884)
at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1216)
at org.apache.hadoop.util.Shell.execCommand(Shell.java:1310)
at org.apache.hadoop.util.Shell.execCommand(Shell.java:1292)
at org.apache.hadoop.fs.FileUtil.execCommand(FileUtil.java:1118)
at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:693)
at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.getPermission(RawLocalFileSystem.java:668)
at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:233)
at org.apache.hadoop.util.DiskChecker.checkDirInternal(DiskChecker.java:141)
at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:116)
at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:128)
at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:44)
at org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker$1.call(ThrottledAsyncChecker.java:142)
at java.util.concurrent.FutureTask.run(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:731)
at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.getPermission(RawLocalFileSystem.java:668)
at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:233)
at org.apache.hadoop.util.DiskChecker.checkDirInternal(DiskChecker.java:141)
at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:116)
at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:128)
at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:44)
at org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker$1.call(ThrottledAsyncChecker.java:142)
at java.util.concurrent.FutureTask.run(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
20/04/21 13:05:15 ERROR datanode.DataNode: Exception in secureMain
org.apache.hadoop.util.DiskChecker$DiskErrorException: Too many failed volumes - current valid volumes: 0, volumes configured: 1, volumes failed: 1, volume failures tolerated: 0
at org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker.check(StorageLocationChecker.java:217)
at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:2688)
at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:2598)
at org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode(DataNode.java:2645)
at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.java:2789)
at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2813)
20/04/21 13:05:15 INFO util.ExitUtil: Exiting with status 1: org.apache.hadoop.util.DiskChecker$DiskErrorException: Too many failed volumes - current valid volumes: 0, volumes configured: 1, volumes failed: 1, volume failures tolerated: 0
20/04/21 13:05:15 INFO datanode.DataNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down DataNode at trnd-5/10.128.0.9
************************************************************/
Что может быть решением для этого?