У меня проблема с Py4JJavaError: Произошла ошибка при вызове z: org. apache .spark.api. python .PythonRDD.collectAndServe.over - PullRequest
0 голосов
/ 10 января 2020

Я запускаю pyspark в ноутбуке jupyter через MacPro

import findspark  
findspark.init() from pyspark 
import SparkContext 
sc=SparkContext(master="local[4]") 
import numpy as np 
A=sc.parallelize([(1,3),(3,100),(1,-5),(3,2)]) 
A.collect() 
B=A.groupByKey().map(lambda k,t:(k,[x for x in t]))

все прошло хорошо, код выше

, но

B.collect()

Я получил ошибку, как показано ниже

---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
<ipython-input-21-bd83718c124b> in <module>
----> 1 B.collect()

~/spark-3.0.0-preview2-bin-hadoop2.7/python/pyspark/rdd.py in collect(self)
    887         """
    888         with SCCallSiteSync(self.context) as css:
--> 889             sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
    890         return list(_load_from_socket(sock_info, self._jrdd_deserializer))
    891 

~/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/py4j-0.10.8.1-src.zip/py4j/java_gateway.py in __call__(self, *args)
   1284         answer = self.gateway_client.send_command(command)
   1285         return_value = get_return_value(
-> 1286             answer, self.gateway_client, self.target_id, self.name)
   1287 
   1288         for temp_arg in temp_args:

~/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/py4j-0.10.8.1-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
    326                 raise Py4JJavaError(
    327                     "An error occurred while calling {0}{1}{2}.\n".
--> 328                     format(target_id, ".", name), value)
    329             else:
    330                 raise Py4JError(

Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 3 in stage 14.0 failed 1 times, most recent failure: Lost task 3.0 in stage 14.0 (TID 41, 172.26.10.195, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 597, in main
    process()
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 589, in process
    serializer.dump_stream(out_iter, outfile)
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 513, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/util.py", line 107, in wrapper
    return f(*args, **kwargs)
TypeError: <lambda>() missing 1 required positional argument: 't'

    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:484)
    at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:619)
    at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:602)
    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:437)
    at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
    at scala.collection.Iterator.foreach(Iterator.scala:941)
    at scala.collection.Iterator.foreach$(Iterator.scala:941)
    at org.apache.spark.InterruptibleIterator.foreach(InterruptibleIterator.scala:28)
    at scala.collection.generic.Growable.$plus$plus$eq(Growable.scala:62)
    at scala.collection.generic.Growable.$plus$plus$eq$(Growable.scala:53)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:105)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:49)
    at scala.collection.TraversableOnce.to(TraversableOnce.scala:315)
    at scala.collection.TraversableOnce.to$(TraversableOnce.scala:313)
    at org.apache.spark.InterruptibleIterator.to(InterruptibleIterator.scala:28)
    at scala.collection.TraversableOnce.toBuffer(TraversableOnce.scala:307)
    at scala.collection.TraversableOnce.toBuffer$(TraversableOnce.scala:307)
    at org.apache.spark.InterruptibleIterator.toBuffer(InterruptibleIterator.scala:28)
    at scala.collection.TraversableOnce.toArray(TraversableOnce.scala:294)
    at scala.collection.TraversableOnce.toArray$(TraversableOnce.scala:288)
    at org.apache.spark.InterruptibleIterator.toArray(InterruptibleIterator.scala:28)
    at org.apache.spark.rdd.RDD.$anonfun$collect$2(RDD.scala:1004)
    at org.apache.spark.rdd.RDD$$Lambda$1137/545621327.apply(Unknown Source)
    at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
    at org.apache.spark.SparkContext$$Lambda$1142/868982109.apply(Unknown Source)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
    at org.apache.spark.scheduler.Task.run(Task.scala:127)
    at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
    at org.apache.spark.executor.Executor$TaskRunner$$Lambda$1044/973987088.apply(Unknown Source)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
    at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1977)
    at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1976)
    at org.apache.spark.scheduler.DAGScheduler$$Lambda$1724/174043468.apply(Unknown Source)
    at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
    at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1976)
    at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:956)
    at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:956)
    at org.apache.spark.scheduler.DAGScheduler$$Lambda$1720/96947809.apply(Unknown Source)
    at scala.Option.foreach(Option.scala:407)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:956)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2206)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2181)
    at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1004)
    at org.apache.spark.rdd.RDD$$Lambda$750/156943736.apply(Unknown Source)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
    at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
    at org.apache.spark.rdd.RDD.collect(RDD.scala:1003)
    at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:168)
    at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:483)
    at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
    at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
    at py4j.Gateway.invoke(Gateway.java:282)
    at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
    at py4j.commands.CallCommand.execute(CallCommand.java:79)
    at py4j.GatewayConnection.run(GatewayConnection.java:238)
    at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 597, in main
    process()
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 589, in process
    serializer.dump_stream(out_iter, outfile)
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 513, in dump_stream
    vs = list(itertools.islice(iterator, batch))
  File "/Users/exblabasia/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/util.py", line 107, in wrapper
    return f(*args, **kwargs)
TypeError: <lambda>() missing 1 required positional argument: 't'

    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:484)
    at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:619)
    at org.apache.spark.api.python.PythonRunner$$anon$3.read(PythonRunner.scala:602)
    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:437)
    at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
    at scala.collection.Iterator.foreach(Iterator.scala:941)
    at scala.collection.Iterator.foreach$(Iterator.scala:941)
    at org.apache.spark.InterruptibleIterator.foreach(InterruptibleIterator.scala:28)
    at scala.collection.generic.Growable.$plus$plus$eq(Growable.scala:62)
    at scala.collection.generic.Growable.$plus$plus$eq$(Growable.scala:53)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:105)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:49)
    at scala.collection.TraversableOnce.to(TraversableOnce.scala:315)
    at scala.collection.TraversableOnce.to$(TraversableOnce.scala:313)
    at org.apache.spark.InterruptibleIterator.to(InterruptibleIterator.scala:28)
    at scala.collection.TraversableOnce.toBuffer(TraversableOnce.scala:307)
    at scala.collection.TraversableOnce.toBuffer$(TraversableOnce.scala:307)
    at org.apache.spark.InterruptibleIterator.toBuffer(InterruptibleIterator.scala:28)
    at scala.collection.TraversableOnce.toArray(TraversableOnce.scala:294)
    at scala.collection.TraversableOnce.toArray$(TraversableOnce.scala:288)
    at org.apache.spark.InterruptibleIterator.toArray(InterruptibleIterator.scala:28)
    at org.apache.spark.rdd.RDD.$anonfun$collect$2(RDD.scala:1004)
    at org.apache.spark.rdd.RDD$$Lambda$1137/545621327.apply(Unknown Source)
    at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
    at org.apache.spark.SparkContext$$Lambda$1142/868982109.apply(Unknown Source)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
    at org.apache.spark.scheduler.Task.run(Task.scala:127)
    at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
    at org.apache.spark.executor.Executor$TaskRunner$$Lambda$1044/973987088.apply(Unknown Source)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    ... 1 more

Ответы [ 2 ]

0 голосов
/ 10 января 2020

Ошибка очень ясна TypeError: <lambda>() missing 1 required positional argument. Lambda ожидает 2 аргумента, а вы передаете один (кортеж).

Это исправлено:

B=A.groupByKey().map(lambda tuple:(tuple[0],[x for x in tuple[1]]))
0 голосов
/ 10 января 2020

Я не знаю объем данных, которые вы пытаетесь собрать, но в основном это означает, что все данные заносятся в вашу оперативную память, поэтому просто избегайте collect(), когда он не нужен, и замените его на show()

Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...