Ошибка при запуске программы структурированной потоковой передачи - PullRequest
0 голосов
/ 04 марта 2019

Я пытаюсь запустить пример программы StructuredNetworkWordCount в структурированном потоке.Но при его запуске возникает ошибка.Подробные журналы следующие:

INFO StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
WARN TextSocketSourceProvider: The socket source should not be used for production applications! It does not support recovery.
ERROR StreamMetadata: Error writing stream metadata StreamMetadata(be38301d-e098-4040-9b17-e01eea67dace) to /tmp/temporary-5b3821f4-07c5-4830-b664-7fcee2a7a8d9/metadata
java.io.IOException: timeout
    at org.apache.hadoop.fs.DfsOutputStream.nativeClose(Native Method)
    at org.apache.hadoop.fs.DfsOutputStream.close(DfsOutputStream.java:118)
    at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
    at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106)
    at sun.nio.cs.StreamEncoder.implClose(StreamEncoder.java:320)
    at sun.nio.cs.StreamEncoder.close(StreamEncoder.java:149)
    at java.io.OutputStreamWriter.close(OutputStreamWriter.java:233)
    at com.fasterxml.jackson.core.json.WriterBasedJsonGenerator.close(WriterBasedJsonGenerator.java:883)
    at com.fasterxml.jackson.databind.ObjectMapper._configAndWriteValue(ObjectMapper.java:3561)
    at com.fasterxml.jackson.databind.ObjectMapper.writeValue(ObjectMapper.java:2909)
    at org.json4s.jackson.Serialization$.write(Serialization.scala:27)
    at org.apache.spark.sql.execution.streaming.StreamMetadata$.write(StreamMetadata.scala:78)
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anonfun$6.apply(StreamExecution.scala:116)
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anonfun$6.apply(StreamExecution.scala:114)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.sql.execution.streaming.StreamExecution.<init>(StreamExecution.scala:114)
    at org.apache.spark.sql.streaming.StreamingQueryManager.createQuery(StreamingQueryManager.scala:240)
    at org.apache.spark.sql.streaming.StreamingQueryManager.startQuery(StreamingQueryManager.scala:278)
    at org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:282)
    at com.baidu.inf.spark.StructuredNetworkWordCount$.main(StructuredNetworkWordCount.scala:53)
    at com.baidu.inf.spark.StructuredNetworkWordCount.main(StructuredNetworkWordCount.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:497)
    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:775)
    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)
    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:119)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Exception in thread "main" java.io.IOException: timeout
    at org.apache.hadoop.fs.DfsOutputStream.nativeClose(Native Method)
    at org.apache.hadoop.fs.DfsOutputStream.close(DfsOutputStream.java:118)
    at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:72)
    at org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:106)
    at sun.nio.cs.StreamEncoder.implClose(StreamEncoder.java:320)
    at sun.nio.cs.StreamEncoder.close(StreamEncoder.java:149)
    at java.io.OutputStreamWriter.close(OutputStreamWriter.java:233)
    at com.fasterxml.jackson.core.json.WriterBasedJsonGenerator.close(WriterBasedJsonGenerator.java:883)
    at com.fasterxml.jackson.databind.ObjectMapper._configAndWriteValue(ObjectMapper.java:3561)
    at com.fasterxml.jackson.databind.ObjectMapper.writeValue(ObjectMapper.java:2909)
    at org.json4s.jackson.Serialization$.write(Serialization.scala:27)
    at org.apache.spark.sql.execution.streaming.StreamMetadata$.write(StreamMetadata.scala:78)
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anonfun$6.apply(StreamExecution.scala:116)
    at org.apache.spark.sql.execution.streaming.StreamExecution$$anonfun$6.apply(StreamExecution.scala:114)
    at scala.Option.getOrElse(Option.scala:121)
    at org.apache.spark.sql.execution.streaming.StreamExecution.<init>(StreamExecution.scala:114)
    at org.apache.spark.sql.streaming.StreamingQueryManager.createQuery(StreamingQueryManager.scala:240)
    at org.apache.spark.sql.streaming.StreamingQueryManager.startQuery(StreamingQueryManager.scala:278)
    at org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:282)
    at com.baidu.inf.spark.StructuredNetworkWordCount$.main(StructuredNetworkWordCount.scala:53)
    at com.baidu.inf.spark.StructuredNetworkWordCount.main(StructuredNetworkWordCount.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:497)
    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:775)
    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)
    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:119)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
INFO SparkContext: Invoking stop() from shutdown hook
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...