Я хочу реализовать Java-приложение Spark, которое выполняет выборку с некоторыми фильтрами для таблицы и объединяет результат с другой таблицей.
Я создал демонстрационное приложение с использованием RDD, которое отлично работает в локальной среде (локальный Spark и Cassandra).
Однако, когда я поместил приложение в кластер Spark с удаленной базой данных Cassandra, он выдал мне такую ошибку:
Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 17 in stage 1.0 failed 4 times, most recent failure: Lost task 17.3 in stage 1.0 (TID 64, 53.55.75.24
3, executor 3): java.io.IOException: org.apache.spark.SparkException: Failed to get broadcast_1_piece0 of broadcast_1
at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1333)
at org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:207)
at org.apache.spark.broadcast.TorrentBroadcast._value$lzycompute(TorrentBroadcast.scala:66)
at org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:66)
at org.apache.spark.broadcast.TorrentBroadcast.getValue(TorrentBroadcast.scala:96)
at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:84)
at org.apache.spark.scheduler.Task.run(Task.scala:121)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Failed to get broadcast_1_piece0 of broadcast_1
at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1.apply$mcVI$sp(TorrentBroadcast.scala:179)
at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1.apply(TorrentBroadcast.scala:151)
at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$org$apache$spark$broadcast$TorrentBroadcast$$readBlocks$1.apply(TorrentBroadcast.scala:151)
at scala.collection.immutable.List.foreach(List.scala:392)
at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1$$anonfun$apply$2.apply(TorrentBroadcast.scala:231)51)
at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1.apply(TorrentBroadcast.scala:211)
... 13 morehe.spark.util.Utils$.tryOrIOException(Utils.scala:1326)
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1889)
Driver sat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)er.scala:1876)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1876)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2059)0)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)ve(DAGScheduler.scala:2048)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)cala:737)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
at org.apache.spark.rdd.RDD.count(RDD.scala:1168)ext.scala:2126)
at org.apache.spark.api.java.AbstractJavaRDDLike.count(JavaRDDLike.scala:45)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.lang.reflect.Method.invoke(Method.java:498)DelegatingMethodAccessorImpl.java:43)
at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)
at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)
at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933).scala:924)
at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1333)
Caused bat org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:207)broadcast_1
at org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:66)t.scala:66)
at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)oadcast.scala:96)
at org.apache.spark.scheduler.Task.run(Task.scala:121)tTask.scala:84)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)xecutor.scala:408)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.lang.Thread.run(Thread.java:748)or$Worker.run(ThreadPoolExecutor.java:624)
Я прикреплю код иклассы конфигурации:
pom.xml
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<version>0.0.2</version>
<properties>
<java.version>1.8</java.version>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<spark-core.version>2.4.3</spark-core.version>
<spark-sql.version>2.4.3</spark-sql.version>
<spark-cassandra-connector.version>2.4.1</spark-cassandra-connector.version>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark-sql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>${spark-core.version}</version>
</dependency>
<dependency>
<groupId>com.datastax.spark</groupId>
<artifactId>spark-cassandra-connector_2.11</artifactId>
<version>2.4.1</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<version>3.5.1</version>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
</build>
</project>
DTO:
public class AirportCityDTO implements Serializable {
private static final long serialVersionUID = 1L;
private int id;
private String name;
private String city;
public AirportCityDTO() {
super();
}
public AirportCityDTO(int id, String name, String city) {
super();
this.id = id;
this.name = name;
this.city = city;
}
}
public class AirportCityStateDTO implements Serializable {
private static final long serialVersionUID = 1L;
private String city;
private String state;
public AirportCityStateDTO() {
super();
}
public AirportCityStateDTO(String city, String state) {
super();
this.city = city;
this.state = state;
}
}
public class AirportCityStateJoinDTO implements Serializable {
private static final long serialVersionUID = 1L;
private String name;
private String city;
private String state;
public AirportCityStateJoinDTO() {
super();
}
public AirportCityStateJoinDTO(String name, String city, String state) {
super();
this.name = name;
this.city = city;
this.state = state;
}
}
public class SparkConfiguration implements Serializable {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(SparkConfiguration.class);
public SparkConfiguration() {
super();
}
public SparkSession initializeSparkSession() {
ClassLoader classLoader = this.getClass().getClassLoader();
try (InputStream input = classLoader.getResourceAsStream("config.properties")) {
Properties applicationPoperties = new Properties();
applicationPoperties.load(input);
String profile = applicationPoperties.getProperty("application.profile");
try (InputStream profileInput = classLoader.getResourceAsStream(profile.concat("-config.properties"))) {
Properties properties = new Properties();
properties.load(profileInput);
return SparkSession.builder().appName("AppName").master(properties.getProperty("spark.master.url"))
.config("spark.cassandra.connection.ssl.enabled",
"dev".equals(profile.trim()) ? "true" : "false")
.config("spark.cassandra.auth.username", properties.getProperty("cassandra.configuration.user"))
.config("spark.cassandra.auth.password",
properties.getProperty("cassandra.configuration.password"))
.config("spark.cassandra.connection.host",
properties.getProperty("cassandra.configuration.contactPoint"))
.config("spark.cassandra.connection.port",
properties.getProperty("cassandra.configuration.port"))
.getOrCreate();
} catch (IOException e) {
LOG.error(e.getLocalizedMessage());
}
} catch (IOException e) {
LOG.error(e.getLocalizedMessage());
}
return null;
}
public class Main extends SparkConfiguration {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(Main .class);
public Main () {
super();
}
public static void main(String[] args) {
Main mainClass = new Main ();
final SparkSession sparkSession = mainClass.initializeSparkSession();
AirportCityStatesRepository allocationRepo = new AirportCityStatesRepository ();
if (sparkSession != null) {
allocationRepo.performJoin(sparkSession);
} else {
LOG.warn("Spark Session couldn't be initialized! Exiting program...");
return;
}
}
}
public class AirportCityStatesRepository implements Serializable {
private static final long serialVersionUID = 1L;
private static final String CASSANDRA_KEYSPACE = ApplicationProperties.getInstance().getKeyspace();
public AirportCityStatesRepository() {
super();
}
public void performJoin() {
JavaPairRDD<String, AirportCityDTO> airportCityPairRows = javaFunctions(sparkContext)
.cassandraTable(CASSANDRA_KEYSPACE, "airport_city", mapRowTo(AirportCityDTO.class)).keyBy(
new Function<AirportCityDTO, String>() {
private static final long serialVersionUID = 1L;
@Override
public String call(AirportCityDTO v1) throws Exception {
return v1.getCity();
}
});
JavaPairRDD<String, AirportCityStateDTO> airportCityStatePairRows = javaFunctions(sparkContext)
.cassandraTable(CASSANDRA_KEYSPACE, "airport_city_state", mapRowTo(AirportCityStateDTO.class))
.keyBy(new Function<AirportCityStateDTO, String>() {
private static final long serialVersionUID = 1L;
@Override
public String call(AirportCityStateDTO v1) throws Exception {
return v1.getCity();
}
});
JavaPairRDD<String, Tuple2<AirportCityDTO, AirportCityStateDTO>> joinRows = airportCityPairRows
.join(airportCityStatePairRows);
JavaRDD<AirportCityStateJoinDTO> finalResult = joinRows.map(
new Function<Tuple2<String, Tuple2<AirportCityDTO, AirportCityStateDTO>>, AirportCityStateJoinDTO>() {
private static final long serialVersionUID = 1L;
@Override
public AirportCityStateJoinDTO call(Tuple2<String, Tuple2<AirportCityDTO, AirportCityStateDTO>> v1)
throws Exception {
AirportCityStateJoinDTO obj = new AirportCityStateJoinDTO();
obj.setCity(v1._1());
obj.setName(v1._2()._1().getName());
obj.setState(v1._2()._2().getState());
return obj;
}
});
List<AirportCityStateJoinDTO> list = finalResult.collect();
for (AirportCityStateJoinDTO l : list) {
System.err.println(l.getName() + " | " + l.getCity() + " | " + l.getState());
}
}
}
config. свойства
# local, dev
application.profile=dev
local-config.properties
# Cassandra connection properties
cassandra.configuration.contactPoint: 53.118.16.40
cassandra.configuration.keyspace: test
cassandra.configuration.port: 9042
cassandra.consistency.level = ONE
cassandra.configuration.user =
cassandra.configuration.password =
spark.app.name=App Name
spark.master.url=local[2]
spark.master.port=7077
dev-config.properties
cassandra.configuration.contactPoint: 53.55.79.246
cassandra.configuration.keyspace: test
cassandra.configuration.port: 9042
cassandra.consistency.level = QUORUM
spark.app.name= App Name
spark.master.url= spark://53.55.75.246:7077
spark.master.port = 7077
Мне удалось найти, где проблема. Когда я получаю данные из Кассандры в JavaRDD, все работает нормально.
Проблема заключается в том, что я применяю операцию преобразования к JavaRDD, который содержит данные из Cassandra. В моем случае я применил keyBy ().
Без keyBy () после того, как я вызвал метод collect () или count () на RDD, он отлично работал и на кластере Spark с удаленной базой данных Cassandra.
Я бы действительноСпасибо за помощь. Я перепробовал все, что смог найти, но ни одно из решений не сработало.
Я также установил локальную базу данных Cassandra на главном сервере Spark из кластера, чтобы попытаться воспроизвести локальную среду, которая есть на моем ПК для разработки, но она вызвала ту же ошибку. Я предполагаю, что это SparkОшибка конфигурации с учетом этого ..
Команда для запуска приложения:
/spark-2.4.3-bin-hadoop2.7/bin/spark-submit \
--class com.spark.example.Main \
--master spark://53.55.75.246:7077 \
--executor-memory 1G \
--executor-cores 5 \
--conf spark.executor.userClassPathFirst=true \
--conf spark.driver.userClasspathFirst=true \
--verbose \
spark-example.jar