Произошла oop ошибка «слишком много сбойных томов» при выполнении команды для получения прав доступа к файлу: ExitCodeException exitCode = -1073741515 - PullRequest
2 голосов
/ 05 мая 2020

При запуске dfs на моем оконном компьютере я получаю эту ошибку:

-- file path: C:/hadoop/data/datanode
2020-05-05 11:44:59,230 WARN checker.StorageLocationChecker: Exception checking StorageLocation [DISK]file:/C:/hadoop/data/datanode
java.lang.RuntimeException: Error while running command to get file permissions : ExitCodeException exitCode=-1073741515:
    at org.apache.hadoop.util.Shell.r`enter code here`unCommand(Shell.java:1009)
    at org.apache.hadoop.util.Shell.run(Shell.java:902)
    at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1227)
    at org.apache.hadoop.util.Shell.execCommand(Shell.java:1321)
    at org.apache.hadoop.util.Shell.execCommand(Shell.java:1303)
    at org.apache.hadoop.fs.FileUtil.execCommand(FileUtil.java:1343)
    at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.loadPermissionInfoByNonNativeIO(RawLocalFileSystem.java:726)
    at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:717)
    at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.getPermission(RawLocalFileSystem.java:678)
    at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:233)
    at org.apache.hadoop.util.DiskChecker.checkDirInternal(DiskChecker.java:141)
    at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:116)
    at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:239)
    at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:52)
    at org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker$1.call(ThrottledAsyncChecker.java:142)
    at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
    at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
    at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:748)
    at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.loadPermissionInfoByNonNativeIO(RawLocalFileSystem.java:766)
    at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:717)
    at org.apache.hadoop.fs.RawLocalFileSystem$DeprecatedRawLocalFileStatus.getPermission(RawLocalFileSystem.java:678)
    at org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:233)
    at org.apache.hadoop.util.DiskChecker.checkDirInternal(DiskChecker.java:141)
    at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:116)
    at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:239)
    at org.apache.hadoop.hdfs.server.datanode.StorageLocation.check(StorageLocation.java:52)
    at org.apache.hadoop.hdfs.server.datanode.checker.ThrottledAsyncChecker$1.call(ThrottledAsyncChecker.java:142)
    at com.google.common.util.concurrent.TrustedListenableFutureTask$TrustedFutureInterruptibleTask.runInterruptibly(TrustedListenableFutureTask.java:125)
    at com.google.common.util.concurrent.InterruptibleTask.run(InterruptibleTask.java:57)
    at com.google.common.util.concurrent.TrustedListenableFutureTask.run(TrustedListenableFutureTask.java:78)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:748)
2020-05-05 11:44:59,243 ERROR datanode.DataNode: Exception in secureMain
org.apache.hadoop.util.DiskChecker$DiskErrorException: Too many failed volumes - current valid volumes: 0, volumes configured: 1, volumes failed: 1, volume failures tolerated: 0

Кто-нибудь сталкивался с этой проблемой?

...