Задание Pyspark прервано из-за ошибки этапа .: реализация kmeans через map reduce - PullRequest
1 голос
/ 26 мая 2020

В рамках моего курса по распределенным базам данных у нас есть задача реализовать алгоритм kmeans с использованием функций уменьшения карты, однако я не могу получить доступ или отобразить результаты моего кода уменьшения карты. Вот моя реализация:

def Find_dist(x,y):
  sum = 0
  vec1= list(x[0])
  vec2 = list(y)
  for i in range(len(vec1)):
    sum = sum +(vec1[i]-vec2[i])*(vec1[i]-vec2[i])
  return sum

def mapper(centers, datapoint):
  min = Find_dist(datapoint,cent[0])
  closest = cent[0]
  for i in range(1,len(cent)):
    curr = Find_dist(datapoint,cent[i])
    if curr < min:
      min = curr
      closest = cent[i]
  yield (closest,datapoint)

def combiner(Key,Values):
  sum = [0]*len(Key)
  counter = 0
  for datapoint in Values:
    vec = list(datapoint[0])
    counter = counter+1
    sum = sum+vec
  point = Row(vec)
  result = (counter,point)
  yield (Key, result)

def Reducer(Key,Values):
  sum = [0]*len(Key)
  total_counter = 0
  for value in Values:
    counter = value[0]
    data = list(value[1][0])
    sum = sum+data
    total_counter = total_counter+counter
  avg = [0]*len(Key)
  for i in range(len(Key)):
    avg[i] = sum[i]/total_counted
  centroid = Row(avg)
  yield (Key, centroid)

def kmeans_fit(data,k,max_iter):
  centers = data.rdd.takeSample(False,k,seed=42)
  for i in range(max_iter):
    mapped = data.rdd.map(lambda x: mapper(centers,x))
    combined = mapped.combineByKey(map, combiner,combiner)
    reduced = combined.reduceByKey(lambda x: Reducer(x)).collect()
    flag = True
    for i in range(k):
      if(reduced[i][1] != reduced[i][0] ):
        for j in range(k):
          centers[i] = reduced[i][1]
        flag = False
        break
    if (flag):
      break
  return centers
data = spark.read.parquet("/mnt/ddscoursedatabricksstg/ddscoursedatabricksdata/random_data.parquet")
kmeans_fit(data,5,10)

Однако я получаю эту ошибку, когда пытаюсь отобразить данные на каждой из этапов объединения или сокращения карты:

org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 1077.0 failed 4 times, most recent failure: Lost task 1.3 in stage 1077.0 (TID 29254, 10.139.64.7, executor 20): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
<command-2268222378870327> in <module>
     59   return centers
     60 data = spark.read.parquet("/mnt/ddscoursedatabricksstg/ddscoursedatabricksdata/random_data.parquet")
---> 61 kmeans_fit(data,5,10)

<command-2268222378870327> in kmeans_fit(data, k, max_iter)
     47     mapped = data.rdd.map(lambda x: mapper(centers,x))
     48     combined = mapped.combineByKey(map, combiner,combiner)
---> 49     reduced = combined.reduceByKey(lambda x: Reducer(x)).collect()
     50     flag = True
     51     for i in range(k):

/databricks/spark/python/pyspark/rdd.py in collect(self)
    829         # Default path used in OSS Spark / for non-credential passthrough clusters:
    830         with SCCallSiteSync(self.context) as css:
--> 831             sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
    832         return list(_load_from_socket(sock_info, self._jrdd_deserializer))
    833 

/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
   1255         answer = self.gateway_client.send_command(command)
   1256         return_value = get_return_value(
-> 1257             answer, self.gateway_client, self.target_id, self.name)
   1258 
   1259         for temp_arg in temp_args:

/databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
     61     def deco(*a, **kw):
     62         try:
---> 63             return f(*a, **kw)
     64         except py4j.protocol.Py4JJavaError as e:
     65             s = e.java_exception.toString()

/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
    326                 raise Py4JJavaError(
    327                     "An error occurred while calling {0}{1}{2}.\n".
--> 328                     format(target_id, ".", name), value)
    329             else:
    330                 raise Py4JError(

Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 1077.0 failed 4 times, most recent failure: Lost task 1.3 in stage 1077.0 (TID 29254, 10.139.64.7, executor 20): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/databricks/spark/python/pyspark/worker.py", line 480, in main
    process()
  File "/databricks/spark/python/pyspark/worker.py", line 470, in process
    out_iter = func(split_index, iterator)
  File "/databricks/spark/python/pyspark/rdd.py", line 2542, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "/databricks/spark/python/pyspark/rdd.py", line 2542, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "/databricks/spark/python/pyspark/rdd.py", line 353, in func
    return f(iterator)
  File "/databricks/spark/python/pyspark/rdd.py", line 1904, in combineLocally
    merger.mergeValues(iterator)
  File "/databricks/spark/python/pyspark/shuffle.py", line 238, in mergeValues
    for k, v in iterator:
  File "<command-2268222378870327>", line 10, in mapper
  File "<command-2268222378870327>", line 3, in Find_dist
TypeError: 'float' object is not iterable

    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:540)
    at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:676)
    at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:659)
    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:494)
    at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
    at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
    at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
    at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
    at org.apache.spark.scheduler.Task.doRunTask(Task.scala:140)
    at org.apache.spark.scheduler.Task.run(Task.scala:113)
    at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:537)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1541)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:543)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
    at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2362)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2350)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2349)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2349)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1102)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1102)
    at scala.Option.foreach(Option.scala:257)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1102)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2582)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2529)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2517)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:897)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2280)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2302)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2321)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:2346)
    at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:997)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
    at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
    at org.apache.spark.rdd.RDD.withScope(RDD.scala:392)
    at org.apache.spark.rdd.RDD.collect(RDD.scala:996)
    at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:247)
    at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
    at sun.reflect.GeneratedMethodAccessor220.invoke(Unknown Source)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:498)
    at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
    at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)
    at py4j.Gateway.invoke(Gateway.java:295)
    at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
    at py4j.commands.CallCommand.execute(CallCommand.java:79)
    at py4j.GatewayConnection.run(GatewayConnection.java:251)
    at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/databricks/spark/python/pyspark/worker.py", line 480, in main
    process()
  File "/databricks/spark/python/pyspark/worker.py", line 470, in process
    out_iter = func(split_index, iterator)
  File "/databricks/spark/python/pyspark/rdd.py", line 2542, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "/databricks/spark/python/pyspark/rdd.py", line 2542, in pipeline_func
    return func(split, prev_func(split, iterator))
  File "/databricks/spark/python/pyspark/rdd.py", line 353, in func
    return f(iterator)
  File "/databricks/spark/python/pyspark/rdd.py", line 1904, in combineLocally
    merger.mergeValues(iterator)
  File "/databricks/spark/python/pyspark/shuffle.py", line 238, in mergeValues
    for k, v in iterator:
  File "<command-2268222378870327>", line 10, in mapper
  File "<command-2268222378870327>", line 3, in Find_dist
TypeError: 'float' object is not iterable

    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:540)
    at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:676)
    at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:659)
    at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:494)
    at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
    at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
    at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
    at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
    at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:125)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
    at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
    at org.apache.spark.scheduler.Task.doRunTask(Task.scala:140)
    at org.apache.spark.scheduler.Task.run(Task.scala:113)
    at org.apache.spark.executor.Executor$TaskRunner$$anonfun$13.apply(Executor.scala:537)
    at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1541)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:543)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

Заранее благодарим за всем желающим помочь.

...