Как создать микросервис HDFS с каталогом namenode, записанным на внешний жесткий диск - PullRequest
0 голосов
/ 09 апреля 2020

Ситуация: я пытаюсь выяснить, как добавить внешний том в миникуб, и до сих пор мне удается это сделать с помощью моникуба: --ip $ myip $ externalvolumepath: $ externalvolumepathinsideminikube в дополнение к hostpath persistentvolume, направленному на путь: $ externalvolumepathinsideminikube. Вот пример манифеста, используемого для объявления постоянного тома, persistentvolumeclaim и развертывания:

kind: PersistentVolume
apiVersion: v1
metadata:
  name: files-archive-volume
  labels:
    type: local
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteMany
  hostPath:
    path: $externalvolumepathinsideminikube


kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: files-archive-volumeclaim
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ""
  volumeName: files-archive-volume
  resources:
    requests:
      storage: 5Gi


apiVersion: apps/v1
kind: Deployment
metadata:
  name: hdfs
spec:
  selector:
    matchLabels:
      run: hdfs
  replicas: 1
  template:
    metadata:
      labels:
        run: hdfs
    spec:
      hostname: hdfs
      volumes:
#      - name: "files-archive-volumeclaim"
#        hostPath:
#          path: "/external/files/archive/"
#      - name: "files-tmp-volumeclaim"
#        hostPath:
#          path: "/external/files/tmp/"            
      - name: files-archive-volumeclaim
        persistentVolumeClaim:
          claimName: files-archive-volumeclaim
      - name: files-tmp-volumeclaim
        persistentVolumeClaim:
          claimName: files-tmp-volumeclaim
      - name: config-hdfs-core-site
        configMap:
          name: config-hdfs-core-site
      - name: config-hdfs-hadoop-env
        configMap:
          name: config-hdfs-hadoop-env
      - name: config-hdfs-hdfs-site
        configMap:
          name: config-hdfs-hdfs-site     
      - name: config-hdfs-mapred-site
        configMap:
          name: config-hdfs-mapred-site 
      - name: config-hdfs-yarn-site
        configMap:
          name: config-hdfs-yarn-site
      - name: config-sshd-config
        configMap:
          name: config-sshd-config
      - name: config-ssh-config
        configMap:
          name: config-ssh-config           
      - name: init-hdfs
        configMap:
          name: init-hdfs 
      containers:      
      - name: hdfs
        image: hdfs:mar2020
        env:
          - name: PROTOCOLE
            value: "init"
        resources:
          limits:
            cpu: "1"  
        ports:
        - containerPort: 22
        - containerPort: 2122
        - containerPort: 8030
        - containerPort: 8031
        - containerPort: 8032
        - containerPort: 8033
        - containerPort: 8040
        - containerPort: 8042
        - containerPort: 8088
        - containerPort: 9000
        - containerPort: 19888
        - containerPort: 49707
        - containerPort: 50010
        - containerPort: 50020
        - containerPort: 50070
        - containerPort: 50075
        - containerPort: 50090
        volumeMounts:
        - mountPath: /home/hadoop/
          name: files-archive-volumeclaim
        - mountPath: /tmp/
          name: files-tmp-volumeclaim
        - mountPath: usr/local/hadoop/etc/hadoop/core-site.xml
          subPath: core-site.xml
          name: config-hdfs-core-site
        - mountPath: usr/local/hadoop/etc/hadoop/hadoop-env.sh
          subPath: hadoop-env.sh
          name: config-hdfs-hadoop-env
        - mountPath: usr/local/hadoop/etc/hadoop/hdfs-site.xml
          subPath: hdfs-site.xml
          name: config-hdfs-hdfs-site
        - mountPath: usr/local/hadoop/etc/hadoop/mapred-site.xml
          subPath: mapred-site.xml
          name: config-hdfs-mapred-site
        - mountPath: usr/local/hadoop/etc/hadoop/yarn-site.xml
          subPath: yarn-site.xml
          name: config-hdfs-yarn-site
        - mountPath: etc/ssh/sshd_config
          subPath: sshd_config
          name: config-sshd-config
        - mountPath: etc/ssh/ssh_config
          subPath: ssh_config
          name: config-ssh-config
        - mountPath: home/init-hdfs.sh
          subPath: init-hdfs.sh
          name: init-hdfs
        command: ["tail", "-f" , "/dev/null"]

В этом развертывании я использую пользовательский образ с установкой HDFS. Dockerfile -

FROM ubuntu:18.04

### Ubuntu with some basic tools
RUN apt-get update \
&& apt-get install -y curl git unzip wget openssh-server 

RUN echo "root:dummypassword" | chpasswd



#RUN mkdir /home/hadoop/.ssh/ \
#&& ssh-keygen -b 2048 -t rsa -f /home/hadoop/.ssh/id_rsa -q -N "" \
#&& cat /home/hadoop/.ssh/id_rsa.pub >> root/.ssh/authorized_keys \
#&& ssh localhost


### Java 8 
RUN apt update \
&& apt install -y openjdk-8-jdk openjdk-8-jre

ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
ENV JRE_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre
ENV PATH=$PATH:$JAVA_HOME/bin

### Hadoop
RUN  wget http://apache.claz.org/hadoop/common/hadoop-2.10.0/hadoop-2.10.0.tar.gz \
&& tar xzf hadoop-2.10.0.tar.gz \
&& mv hadoop-2.10.0 /usr/local/hadoop/


ENV HADOOP_HOME=/usr/local/hadoop 
ENV HADOOP_MAPRED_HOME=$HADOOP_HOME 
ENV HADOOP_COMMON_HOME=$HADOOP_HOME
ENV HADOOP_HDFS_HOME=$HADOOP_HOME
ENV YARN_HOME=$HADOOP_HOME
ENV HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
ENV HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib/native" 
ENV PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

COPY init-hdfs.sh home/init-hdfs.sh
COPY ssh_config etc/ssh/ssh_config
COPY sshd_config etc/ssh/sshd_config
COPY yarn-site.xml usr/local/hadoop/etc/hadoop/yarn-site.xml
COPY mapred-site.xml usr/local/hadoop/etc/hadoop/mapred-site.xml
COPY hdfs-site.xml usr/local/hadoop/etc/hadoop/hdfs-site.xml
COPY hadoop-env.sh usr/local/hadoop/etc/hadoop/hadoop-env.sh
COPY core-site.xml usr/local/hadoop/etc/hadoop/core-site.xml


RUN /etc/init.d/ssh restart \
&& ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N "" \
&& cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys \
&& ssh-keyscan 'localhost (127.0.0.1)' >> /root/.ssh/known_hosts \
&& ssh-keyscan 'localhost' >> /root/.ssh/known_hosts \
&& ssh-keyscan '0.0.0.0' >> /root/.ssh/known_hosts 

. Для инициализации микросервиса я сначала запускаю s sh и форматирую namenode. До этого момента все работало нормально (сообщение об ошибке отсутствует, и микросервис может записать файл / папку на внешний том)

/etc/init.d/ssh start
echo Y | hdfs namenode -format 


20/04/09 07:29:19 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
20/04/09 07:29:19 INFO namenode.NameNode: createNameNode [-format]
Formatting using clusterid: CID-e90fa19a-af02-4995-90c9-2e5e8b48e82f
20/04/09 07:29:20 INFO namenode.FSEditLog: Edit logging is async:true
20/04/09 07:29:20 INFO namenode.FSNamesystem: KeyProvider: null
20/04/09 07:29:20 INFO namenode.FSNamesystem: fsLock is fair: true
20/04/09 07:29:20 INFO namenode.FSNamesystem: Detailed lock hold time metrics enabled: false
20/04/09 07:29:20 INFO namenode.FSNamesystem: fsOwner             = root (auth:SIMPLE)
20/04/09 07:29:20 INFO namenode.FSNamesystem: supergroup          = supergroup
20/04/09 07:29:20 INFO namenode.FSNamesystem: isPermissionEnabled = true
20/04/09 07:29:20 INFO namenode.FSNamesystem: HA Enabled: false
20/04/09 07:29:20 INFO common.Util: dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
20/04/09 07:29:20 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit: configured=1000, counted=60, effected=1000
20/04/09 07:29:20 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
20/04/09 07:29:20 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
20/04/09 07:29:20 INFO blockmanagement.BlockManager: The block deletion will start around 2020 Apr 09 07:29:20
20/04/09 07:29:20 INFO util.GSet: Computing capacity for map BlocksMap
20/04/09 07:29:20 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:20 INFO util.GSet: 2.0% max memory 966.7 MB = 19.3 MB
20/04/09 07:29:20 INFO util.GSet: capacity      = 2^21 = 2097152 entries
20/04/09 07:29:20 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
20/04/09 07:29:20 WARN conf.Configuration: No unit for dfs.heartbeat.interval(3) assuming SECONDS
20/04/09 07:29:20 WARN conf.Configuration: No unit for dfs.namenode.safemode.extension(30000) assuming MILLISECONDS
20/04/09 07:29:20 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
20/04/09 07:29:20 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.min.datanodes = 0
20/04/09 07:29:20 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.extension = 30000
20/04/09 07:29:20 INFO blockmanagement.BlockManager: defaultReplication         = 1
20/04/09 07:29:20 INFO blockmanagement.BlockManager: maxReplication             = 512
20/04/09 07:29:20 INFO blockmanagement.BlockManager: minReplication             = 1
20/04/09 07:29:20 INFO blockmanagement.BlockManager: maxReplicationStreams      = 2
20/04/09 07:29:20 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
20/04/09 07:29:20 INFO blockmanagement.BlockManager: encryptDataTransfer        = false
20/04/09 07:29:20 INFO blockmanagement.BlockManager: maxNumBlocksToLog          = 1000
20/04/09 07:29:20 INFO namenode.FSNamesystem: Append Enabled: true
20/04/09 07:29:20 INFO namenode.FSDirectory: GLOBAL serial map: bits=24 maxEntries=16777215
20/04/09 07:29:20 INFO util.GSet: Computing capacity for map INodeMap
20/04/09 07:29:20 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:20 INFO util.GSet: 1.0% max memory 966.7 MB = 9.7 MB
20/04/09 07:29:20 INFO util.GSet: capacity      = 2^20 = 1048576 entries
20/04/09 07:29:20 INFO namenode.FSDirectory: ACLs enabled? false
20/04/09 07:29:20 INFO namenode.FSDirectory: XAttrs enabled? true
20/04/09 07:29:20 INFO namenode.NameNode: Caching file names occurring more than 10 times
20/04/09 07:29:20 INFO snapshot.SnapshotManager: Loaded config captureOpenFiles: falseskipCaptureAccessTimeOnlyChange: false
20/04/09 07:29:20 INFO util.GSet: Computing capacity for map cachedBlocks
20/04/09 07:29:20 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:20 INFO util.GSet: 0.25% max memory 966.7 MB = 2.4 MB
20/04/09 07:29:20 INFO util.GSet: capacity      = 2^18 = 262144 entries
20/04/09 07:29:20 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
20/04/09 07:29:20 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
20/04/09 07:29:20 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
20/04/09 07:29:20 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
20/04/09 07:29:20 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
20/04/09 07:29:20 INFO util.GSet: Computing capacity for map NameNodeRetryCache
20/04/09 07:29:20 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:20 INFO util.GSet: 0.029999999329447746% max memory 966.7 MB = 297.0 KB
20/04/09 07:29:20 INFO util.GSet: capacity      = 2^15 = 32768 entries
20/04/09 07:29:20 INFO namenode.FSImage: Allocated new BlockPoolId: BP-723004653-172.17.0.8-1586417360741
20/04/09 07:29:20 INFO common.Storage: Storage directory /home/hadoop/hadoopinfra/hdfs/namenode has been successfully formatted.
20/04/09 07:29:20 INFO namenode.FSImageFormatProtobuf: Saving image file /home/hadoop/hadoopinfra/hdfs/namenode/current/fsimage.ckpt_0000000000000000000 using no compression
20/04/09 07:29:21 INFO namenode.FSImageFormatProtobuf: Image file /home/hadoop/hadoopinfra/hdfs/namenode/current/fsimage.ckpt_0000000000000000000 of size 323 bytes saved in 0 seconds .
20/04/09 07:29:21 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
20/04/09 07:29:21 INFO namenode.FSImage: FSImageSaver clean checkpoint: txid = 0 when meet shutdown.
20/04/09 07:29:21 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at hdfs/172.17.0.8

Проблема возникает, когда я запускаю hdfs (start-dfs . sh && start-yarn. sh) В оболочке нет ошибок печати, но я не вижу процесс java с командой jps. Более того, любой $ HADOOP_HOME / bin / has oop fs -mkdir / tmp возвращает мне ошибку, в которой говорится, что не удалось установить соединение hdfs / ipcontainer с localhost: 9000. Я копаю это немного дальше и, наконец, понимаю, что наменоде не работает. Я пытаюсь использовать команду hdfs namenode, чтобы запустить только namenode, и она возвращает мне следующую строку.

20/04/09 07:29:49 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
20/04/09 07:29:49 INFO namenode.NameNode: createNameNode []
20/04/09 07:29:49 INFO impl.MetricsConfig: loaded properties from hadoop-metrics2.properties
20/04/09 07:29:49 INFO impl.MetricsSystemImpl: Scheduled Metric snapshot period at 10 second(s).
20/04/09 07:29:49 INFO impl.MetricsSystemImpl: NameNode metrics system started
20/04/09 07:29:49 INFO namenode.NameNode: fs.defaultFS is hdfs://localhost:9000
20/04/09 07:29:49 INFO namenode.NameNode: Clients are to use localhost:9000 to access this namenode/service.
20/04/09 07:29:50 INFO util.JvmPauseMonitor: Starting JVM pause monitor
20/04/09 07:29:50 INFO hdfs.DFSUtil: Starting Web-server for hdfs at: http://0.0.0.0:50070
20/04/09 07:29:50 INFO mortbay.log: Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog
20/04/09 07:29:50 INFO server.AuthenticationFilter: Unable to initialize FileSignerSecretProvider, falling back to use random secrets.
20/04/09 07:29:50 INFO http.HttpRequestLog: Http request log for http.requests.namenode is not defined
20/04/09 07:29:50 INFO http.HttpServer2: Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
20/04/09 07:29:50 INFO http.HttpServer2: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context hdfs
20/04/09 07:29:50 INFO http.HttpServer2: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context logs
20/04/09 07:29:50 INFO http.HttpServer2: Added filter static_user_filter (class=org.apache.hadoop.http.lib.StaticUserWebFilter$StaticUserFilter) to context static
20/04/09 07:29:50 INFO http.HttpServer2: Added filter 'org.apache.hadoop.hdfs.web.AuthFilter' (class=org.apache.hadoop.hdfs.web.AuthFilter)
20/04/09 07:29:50 INFO http.HttpServer2: addJerseyResourcePackage: packageName=org.apache.hadoop.hdfs.server.namenode.web.resources;org.apache.hadoop.hdfs.web.resources, pathSpec=/webhdfs/v1/*
20/04/09 07:29:50 INFO http.HttpServer2: Jetty bound to port 50070
20/04/09 07:29:50 INFO mortbay.log: jetty-6.1.26
20/04/09 07:29:50 INFO mortbay.log: Started HttpServer2$SelectChannelConnectorWithSafeStartup@0.0.0.0:50070
20/04/09 07:29:50 WARN namenode.FSNamesystem: Only one image storage directory (dfs.namenode.name.dir) configured. Beware of data loss due to lack of redundant storage directories!
20/04/09 07:29:50 WARN namenode.FSNamesystem: Only one namespace edits storage directory (dfs.namenode.edits.dir) configured. Beware of data loss due to lack of redundant storage directories!
20/04/09 07:29:51 INFO namenode.FSEditLog: Edit logging is async:true
20/04/09 07:29:51 INFO namenode.FSNamesystem: KeyProvider: null
20/04/09 07:29:51 INFO namenode.FSNamesystem: fsLock is fair: true
20/04/09 07:29:51 INFO namenode.FSNamesystem: Detailed lock hold time metrics enabled: false
20/04/09 07:29:51 INFO namenode.FSNamesystem: fsOwner             = root (auth:SIMPLE)
20/04/09 07:29:51 INFO namenode.FSNamesystem: supergroup          = supergroup
20/04/09 07:29:51 INFO namenode.FSNamesystem: isPermissionEnabled = true
20/04/09 07:29:51 INFO namenode.FSNamesystem: HA Enabled: false
20/04/09 07:29:51 INFO common.Util: dfs.datanode.fileio.profiling.sampling.percentage set to 0. Disabling file IO profiling
20/04/09 07:29:51 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit: configured=1000, counted=60, effected=1000
20/04/09 07:29:51 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true
20/04/09 07:29:51 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000
20/04/09 07:29:51 INFO blockmanagement.BlockManager: The block deletion will start around 2020 Apr 09 07:29:51
20/04/09 07:29:51 INFO util.GSet: Computing capacity for map BlocksMap
20/04/09 07:29:51 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:51 INFO util.GSet: 2.0% max memory 966.7 MB = 19.3 MB
20/04/09 07:29:51 INFO util.GSet: capacity      = 2^21 = 2097152 entries
20/04/09 07:29:51 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false
20/04/09 07:29:51 WARN conf.Configuration: No unit for dfs.heartbeat.interval(3) assuming SECONDS
20/04/09 07:29:51 WARN conf.Configuration: No unit for dfs.namenode.safemode.extension(30000) assuming MILLISECONDS
20/04/09 07:29:51 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.threshold-pct = 0.9990000128746033
20/04/09 07:29:51 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.min.datanodes = 0
20/04/09 07:29:51 INFO blockmanagement.BlockManagerSafeMode: dfs.namenode.safemode.extension = 30000
20/04/09 07:29:51 INFO blockmanagement.BlockManager: defaultReplication         = 1
20/04/09 07:29:51 INFO blockmanagement.BlockManager: maxReplication             = 512
20/04/09 07:29:51 INFO blockmanagement.BlockManager: minReplication             = 1
20/04/09 07:29:51 INFO blockmanagement.BlockManager: maxReplicationStreams      = 2
20/04/09 07:29:51 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000
20/04/09 07:29:51 INFO blockmanagement.BlockManager: encryptDataTransfer        = false
20/04/09 07:29:51 INFO blockmanagement.BlockManager: maxNumBlocksToLog          = 1000
20/04/09 07:29:51 INFO namenode.FSNamesystem: Append Enabled: true
20/04/09 07:29:51 INFO namenode.FSDirectory: GLOBAL serial map: bits=24 maxEntries=16777215
20/04/09 07:29:51 INFO util.GSet: Computing capacity for map INodeMap
20/04/09 07:29:51 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:51 INFO util.GSet: 1.0% max memory 966.7 MB = 9.7 MB
20/04/09 07:29:51 INFO util.GSet: capacity      = 2^20 = 1048576 entries
20/04/09 07:29:51 INFO namenode.FSDirectory: ACLs enabled? false
20/04/09 07:29:51 INFO namenode.FSDirectory: XAttrs enabled? true
20/04/09 07:29:51 INFO namenode.NameNode: Caching file names occurring more than 10 times
20/04/09 07:29:51 INFO snapshot.SnapshotManager: Loaded config captureOpenFiles: falseskipCaptureAccessTimeOnlyChange: false
20/04/09 07:29:51 INFO util.GSet: Computing capacity for map cachedBlocks
20/04/09 07:29:51 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:51 INFO util.GSet: 0.25% max memory 966.7 MB = 2.4 MB
20/04/09 07:29:51 INFO util.GSet: capacity      = 2^18 = 262144 entries
20/04/09 07:29:51 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.window.num.buckets = 10
20/04/09 07:29:51 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.num.users = 10
20/04/09 07:29:51 INFO metrics.TopMetrics: NNTop conf: dfs.namenode.top.windows.minutes = 1,5,25
20/04/09 07:29:51 INFO namenode.FSNamesystem: Retry cache on namenode is enabled
20/04/09 07:29:51 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis
20/04/09 07:29:51 INFO util.GSet: Computing capacity for map NameNodeRetryCache
20/04/09 07:29:51 INFO util.GSet: VM type       = 64-bit
20/04/09 07:29:51 INFO util.GSet: 0.029999999329447746% max memory 966.7 MB = 297.0 KB
20/04/09 07:29:51 INFO util.GSet: capacity      = 2^15 = 32768 entries
20/04/09 07:29:51 INFO common.Storage: Lock on /home/hadoop/hadoopinfra/hdfs/namenode/in_use.lock acquired by nodename 86@hdfs
20/04/09 07:29:51 INFO namenode.FileJournalManager: Recovering unfinalized segments in /home/hadoop/hadoopinfra/hdfs/namenode/current
20/04/09 07:29:51 INFO namenode.FSImage: No edit log streams selected.
20/04/09 07:29:51 INFO namenode.FSImage: Planning to load image: FSImageFile(file=/home/hadoop/hadoopinfra/hdfs/namenode/current/fsimage_0000000000000000000, cpktTxId=0000000000000000000)
20/04/09 07:29:51 INFO namenode.FSImageFormatPBINode: Loading 1 INodes.
20/04/09 07:29:51 INFO namenode.FSImageFormatPBINode: Successfully loaded 1 inodes
20/04/09 07:29:51 INFO namenode.FSImageFormatProtobuf: Loaded FSImage in 0 seconds.
20/04/09 07:29:51 INFO namenode.FSImage: Loaded image for txid 0 from /home/hadoop/hadoopinfra/hdfs/namenode/current/fsimage_0000000000000000000
20/04/09 07:29:51 INFO namenode.FSNamesystem: Need to save fs image? false (staleImage=false, haEnabled=false, isRollingUpgrade=false)
20/04/09 07:29:51 INFO namenode.FSEditLog: Starting log segment at 1
20/04/09 07:29:52 INFO namenode.NameCache: initialized with 0 entries 0 lookups
20/04/09 07:29:52 INFO namenode.FSNamesystem: Finished loading FSImage in 795 msecs
20/04/09 07:29:52 INFO namenode.NameNode: RPC server is binding to localhost:9000
20/04/09 07:29:52 INFO namenode.NameNode: Enable NameNode state context:false
20/04/09 07:29:52 INFO ipc.CallQueueManager: Using callQueue: class java.util.concurrent.LinkedBlockingQueue queueCapacity: 1000 scheduler: class org.apache.hadoop.ipc.DefaultRpcScheduler
20/04/09 07:29:52 INFO ipc.Server: Starting Socket Reader #1 for port 9000
20/04/09 07:29:52 INFO namenode.FSNamesystem: Registered FSNamesystemState MBean
20/04/09 07:29:52 INFO namenode.FSNamesystem: Stopping services started for active state
20/04/09 07:29:52 INFO namenode.FSEditLog: Ending log segment 1, 1
20/04/09 07:29:52 INFO namenode.FSEditLog: Number of transactions: 2 Total time for transactions(ms): 1 Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 327
20/04/09 07:29:52 INFO namenode.FileJournalManager: Finalizing edits file /home/hadoop/hadoopinfra/hdfs/namenode/current/edits_inprogress_0000000000000000001 -> /home/hadoop/hadoopinfra/hdfs/namenode/current/edits_0000000000000000001-0000000000000000002
20/04/09 07:29:52 INFO namenode.FSEditLog: FSEditLogAsync was interrupted, exiting
20/04/09 07:29:52 INFO ipc.Server: Stopping server on 9000
20/04/09 07:29:52 INFO namenode.FSNamesystem: Stopping services started for active state
20/04/09 07:29:52 INFO namenode.FSNamesystem: Stopping services started for standby state
20/04/09 07:29:52 INFO mortbay.log: Stopped HttpServer2$SelectChannelConnectorWithSafeStartup@0.0.0.0:50070
20/04/09 07:29:52 INFO impl.MetricsSystemImpl: Stopping NameNode metrics system...
20/04/09 07:29:52 INFO impl.MetricsSystemImpl: NameNode metrics system stopped.
20/04/09 07:29:52 INFO impl.MetricsSystemImpl: NameNode metrics system shutdown complete.
20/04/09 07:29:52 ERROR namenode.NameNode: Failed to start namenode.
java.io.IOException: Could not parse line: 192.168.1.XX             0     0         0        - /home/hadoop
        at org.apache.hadoop.fs.DF.parseOutput(DF.java:195)
        at org.apache.hadoop.fs.DF.getFilesystem(DF.java:76)
        at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker$CheckedVolume.<init>(NameNodeResourceChecker.java:69)
        at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.addDirToCheck(NameNodeResourceChecker.java:165)
        at org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.<init>(NameNodeResourceChecker.java:134)
        at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startCommonServices(FSNamesystem.java:1134)
        at org.apache.hadoop.hdfs.server.namenode.NameNode.startCommonServices(NameNode.java:816)
        at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:755)
        at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:961)
        at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:940)
        at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1714)
        at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1782)
20/04/09 07:29:52 INFO util.ExitUtil: Exiting with status 1: java.io.IOException: Could not parse line: 192.168.1.XX             0     0         0        - /home/hadoop
20/04/09 07:29:52 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at hdfs/172.17.0.8

Возврат исключения "java .io.IOException: Не удалось проанализировать строку: 192.168.1.XX 0 0 0 - / home / has oop." Озадачил меня, потому что 192.168.1 .XX это мой компьютер ip. Затем я пытаюсь развернуть модули с томом hostpath, направленным в папку на жестком диске minikube, и namenode работает нормально. Как запустить namenode на внешнем томе в kubernetes, не сталкиваясь с этой проблемой? Правильно ли я монтирую том в minikube? Это проблема со стороны HDFS?

...