Я настраиваю многоузловой кластер hadoop на контейнерах Docker.
При запуске примера задания mapreduce я вижу сообщение об ошибке ниже.Пожалуйста, поддержите, чтобы решить эту проблему.
Информация о кластере
ОС: CentOS6 Версия докера: 18 Ведущий: 1 подчиненный: 3 (включая одного вторичного ведущего).
Пример задания
[hdfs@hadoop-master mapreduce]$ yarn logs -applicationId application_1543106444393_0002
18/11/25 01:31:46 INFO client.RMProxy: Connecting to ResourceManager at hadoop-master/172.18.0.5:8032
Container: container_1543106444393_0002_02_000001 on hadoop-slave1_43242
==========================================================================
LogType:stderr
Log Upload Time:Sun Nov 25 01:30:36 +0000 2018
LogLength:88
Log Contents:
Error: Could not find or load main class org.apache.hadoop.mapreduce.v2.app.MRAppMaster
End of LogType:stderr
LogType:stdout
Log Upload Time:Sun Nov 25 01:30:36 +0000 2018
LogLength:0
Log Contents:
End of LogType:stdout
Container: container_1543106444393_0002_01_000001 on hadoop-slave2_33444
==========================================================================
LogType:stderr
Log Upload Time:Sun Nov 25 01:30:37 +0000 2018
LogLength:88
Log Contents:
Error: Could not find or load main class org.apache.hadoop.mapreduce.v2.app.MRAppMaster
End of LogType:stderr
Ниже приведены мои конфигурации
/ opt / hadoop / etc / hadoop/hadoop-env.sh
/ opt / hadoop / etc / hadoop / mapred-site.xml
[hdfs @hadoop-master hadoop] $ cat mapred-site.xml
<?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Put site-specific property overrides in this file. --> <configuration>
<property>
<name>mapreduce.application.classpath</name>
<value>
/opt/hadoop/etc/hadoop,
/opt/hadoop/etc/hadoop/share/hadoop/common/*,
/opt/hadoop/share/hadoop/common/lib/*,
/opt/hadoop/share/hadoop/hdfs/*,
/opt/hadoop/share/hadoop/hdfs/lib/*,
/opt/hadoop/share/hadoop/mapreduce/*,
/opt/hadoop/share/hadoop/mapreduce/lib/*,
/opt/hadoop/share/hadoop/yarn/*,
/opt/hadoop/share/hadoop/yarn/lib/*
</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop-master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop-master:19888</value>
</property>
<property>
<name>yarn.app.mapreduce.am.resource.mb</name>
<value>4000</value>
</property>
<property>
<name>yarn.app.mapreduce.am.command-opts</name>
<value>-Xmx3768m</value>
</property>
<property>
<name>mapreduce.map.cpu.vcores</name>
<value>2</value>
</property>
<property>
<name>mapreduce.reduce.cpu.vcores</name>
<value>2</value>
</property> </configuration>
/ opt / hadoop / etc / hadoop / yarn-site.xml
[hdfs @ hadoop-master hadoop] $кошка пряжа-site.xml
<?xml version="1.0"?>
<!-- Site specific YARN configuration properties -->
<configuration>
<property>
<name>yarn.application.classpath</name>
<value>/opt/hadoop/etc/hadoop, /opt/hadoop/etc/hadoop/share/hadoop/common/*, /opt/hadoop/share/hadoop/common/lib/*, /opt/hadoop/share/hadoop/hdfs/*, /opt/hadoop/share/hadoop/hdfs/lib/*, /opt/hadoop/share/hadoop/mapreduce/*, /opt/hadoop/share/hadoop/mapreduce/lib/*, /opt/hadoop/share/hadoop/yarn/*, /opt/hadoop/share/hadoop/yarn/lib/*</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop-master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>4000</value>
</property>
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>2</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>4000</value>
</property>
</configuration>