Исключительная работа, генерирующая исключение нулевого указателя при чтении данных AVRO - PullRequest
0 голосов
/ 08 ноября 2019

Исключительное задание, генерирующее исключение нулевого указателя при чтении данных. Я беру avro-данные и присоединяю их к другому набору данных, и я получаю эту ошибку

Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2039)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2027)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2026)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2026)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:966)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:966)
    at scala.Option.foreach(Option.scala:257)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:966)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2260)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2209)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2198)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:777)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
    at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:365)
    at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
    at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3384)
    at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2545)
    at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2545)
    at org.apache.spark.sql.Dataset$$anonfun$53.apply(Dataset.scala:3365)
    at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
    at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
    at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
    at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3364)
    at org.apache.spark.sql.Dataset.head(Dataset.scala:2545)
    at org.apache.spark.sql.Dataset.take(Dataset.scala:2759)
    at org.apache.spark.sql.Dataset.getRows(Dataset.scala:255)
    at org.apache.spark.sql.Dataset.showString(Dataset.scala:292)
    at org.apache.spark.sql.Dataset.show(Dataset.scala:748)
    at org.apache.spark.sql.Dataset.show(Dataset.scala:725)
    at Test$.main(Test.scala:112)
    at Test.main(Test.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
    at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:849)
    at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:167)
    at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:195)
    at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
    at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:924)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:933)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)

, вызванную: java.lang.NullPointerException в org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.supportedCategories (AvroObjectInspectorGenerator.java:142) в org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator.java:91) в org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator.java:104) в org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator.java:104) в org.apache.hadoop.hive.serde2.avro.ergject.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator.java:104) в org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator. createObjectInspectorWorker (AvroObjectInspectorGenerator.java:104) при org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator.java:104) в org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator. java: 104) в org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator.java:104) в org.apache.hadoop.hive.serde2.avro.jectsв org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator.createObjectInspectorWorker (AvroObjectInspectorGenerator.java:104) в org.apache.hadoop.hive.serde2.avro.AvroOject.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator. (AvroObjectInspectorGenerator.java:56) в org.apache.hadoop.hive.serde2.avro.AvroSerDe.initialize (AvroSerDe.java:124) в org.apache.spark.sql.hive.HadoopTableReader $$ anonfun $ 5 $$ anonfun $ 10.apply (TableReader.scala: 258) в org.apache.spark. sql.hive.HadoopTableReader $$ anonfun $ 5 $$ anonfun $ 10.apply (TableReader.scala: 246) в org.apache.spark.rdd.RDD $$ anonfun $ mapPartitions $ 1 $$ anonfun $ apply $ 23.apply (RDD.scala: 801) в org.apache.spark.rdd.RDD $$ anonfun $ mapPartitions $ 1 $$ anonfun $ apply $ 23.apply (RDD.scala: 801) в org.apache.spark.rdd.MapPartitionsRDD.compute (MapPartitionsRDD.scala: 52) в org.apache.spark.rdd.RDD.computeOrReadCheckpoint (RDD.scala: 324) в org.apache.spark.rdd.RDD.iterator (RDD.scala: 288) в org.apache.spark.rdd. UnionRDD.compute (UnionRDD.scala: 105) в org.apache.spark.rdd.RDD.computeOrReadCheckpoint (RDD.scala: 324) в org.apache.spark.rdd.RDD.iterator (RDD.scala: 288) в организации.apache.spark.rdd.MapPartitionsRDD.compute (MapPartitionsRDD.scala: 52) в org.apache.spark.rdd.RDD.computeOrReadCheckpoint (RDD.scala: 324) в org.apache.spark.rdd.RDD.iterator (RDD. .scala: 288)в org.apache.spark.rdd.MapPartitionsRDD.compute (MapPartitionsRDD.scala: 52) в org.apache.spark.rdd.RDD.computeOrReadCheckpoint (RDD.scala: 324) в org.apache.spark.rdd.RDD.iterator(RDD.scala: 288) в org.apache.spark.rdd.MapPartitionsRDD.compute (MapPartitionsRDD.scala: 52) в org.apache.spark.rdd.RDD.computeOrReadCheckpoint (RDD.scala: 324) в org.apache. spark.rdd.RDD.iterator (RDD.scala: 288) в org.apache.spark.rdd.MapPartitionsRDD.compute (MapPartitionsRDD.scala: 52) в org.apache.spark.rdd.RDD.computeOrReadCheckpoint (RDD.scala:324) в org.apache.spark.rdd.RDD.iterator (RDD.scala: 288)в org.apache.spark.rdd.MapPartitionsRDD.compute (MapPartitionsRDD.scala: 52) в org.apache.spark.rdd.RDD.computeOrReadCheckpoint (RDD.scala: 324) в org.apache.spark.rdd.RDD.iterator(RDD.scala: 288) в org.apache.spark.scheduler.ShuffleMapTask.runTask (ShuffleMapTask.scala: 99) в org.apache.spark.scheduler.SuffleMapTask.runTask (ShuffleMapT. spark.scheduler.Task.run (Task.scala: 121) в org.apache.spark.executor.Executor $ TaskRunner $$ anonfun $ 10.apply (Executor.scala: 402) в org.apache.spark.util.Utils $.tryWithSafeFinally (Utils.scala: 1360) в org.apache.spark.executor.Executor $ TaskRunner.run (Executor.scala: 408) в java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1149)util.concurrent.ThreadPoolExecutor $ Worker.run (ThreadPoolExecutor.java:624) в java.lang.Thread.run (Thread.java:748)

, и это мои sbt-зависимости

libraryDependencies ++=  Seq(
  "org.apache.spark" %% "spark-core" % sparkVersion % "provided" exclude("org.apache.avro", "avro"),
  "org.apache.spark" % "spark-sql_2.11" % "2.3.0"
)

libraryDependencies += "org.apache.spark" %% "spark-avro" % "2.4.4"
libraryDependencies += "org.apache.avro" % "avro" % "1.7.4"

Тот же кусок фрагмента отлично работает в спарк-оболочке.

...