Ошибка при подборе данных СДР в классификатор дерева решений - PullRequest
0 голосов
/ 13 февраля 2020
#load dataset
df = spark.sql("select * from ws_var_dataset2")
def labelData(data):
    # label: row[end], features: row[0:end-1]
    return data.map(lambda row: LabeledPoint(row[-1], row[:-1]))
training_data, testing_data = labelData(df.rdd).randomSplit([0.8, 0.2], seed=12345)

Я получаю ошибку при запуске следующей ячейки:

model = DecisionTree.trainClassifier(training_data, numClasses=2, maxDepth=2, categoricalFeaturesInfo={}, impurity='gini', maxBins=32)

Ошибка : Py4JJavaError: Произошла ошибка при вызове z: org. apache .spark.api . python .PythonRDD.runJob.

Есть идеи о том, почему происходит ошибка?

Ниже приводится полный стек ошибок для справки:

---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
<ipython-input-66-43c2f1c0de4c> in <module>
----> 1 model = DecisionTree.trainClassifier(label_df, numClasses=2, maxDepth=2, categoricalFeaturesInfo={}, impurity='gini', maxBins=32)

/usr/local/spark/python/pyspark/mllib/tree.py in trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
    214         """
    215         return cls._train(data, "classification", numClasses, categoricalFeaturesInfo,
--> 216                           impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
    217 
    218     @classmethod

/usr/local/spark/python/pyspark/mllib/tree.py in _train(cls, data, type, numClasses, features, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
    138     def _train(cls, data, type, numClasses, features, impurity="gini", maxDepth=5, maxBins=32,
    139                minInstancesPerNode=1, minInfoGain=0.0):
--> 140         first = data.first()
    141         assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
    142         model = callMLlibFunc("trainDecisionTreeModel", data, type, numClasses, features,

/usr/local/spark/python/pyspark/rdd.py in first(self)
   1358         ValueError: RDD is empty
   1359         """
-> 1360         rs = self.take(1)
   1361         if rs:
   1362             return rs[0]

/usr/local/spark/python/pyspark/rdd.py in take(self, num)
   1340 
   1341             p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
-> 1342             res = self.context.runJob(self, takeUpToNumLeft, p)
   1343 
   1344             items += res

/usr/local/spark/python/pyspark/context.py in runJob(self, rdd, partitionFunc, partitions, allowLocal)
    966         # SparkContext#runJob.
    967         mappedRDD = rdd.mapPartitions(partitionFunc)
--> 968         port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
    969         return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))
    970 

/usr/local/spark/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py in __call__(self, *args)
   1131         answer = self.gateway_client.send_command(command)
   1132         return_value = get_return_value(
-> 1133             answer, self.gateway_client, self.target_id, self.name)
   1134 
   1135         for temp_arg in temp_args:

/usr/local/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
     61     def deco(*a, **kw):
     62         try:
---> 63             return f(*a, **kw)
     64         except py4j.protocol.Py4JJavaError as e:
     65             s = e.java_exception.toString()

/usr/local/spark/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
    317                 raise Py4JJavaError(
    318                     "An error occurred while calling {0}{1}{2}.\n".
--> 319                     format(target_id, ".", name), value)
    320             else:
    321                 raise Py4JError(

Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.runJob.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 62.0 failed 4 times, most recent failure: Lost task 0.3 in stage 62.0 (TID 323, wlbimsprkprd01.opentext.net, executor 2): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/worker.py", line 160, in main
    func, profiler, deserializer, serializer = read_command(pickleSer, infile)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/worker.py", line 54, in read_command
    command = serializer._read_with_length(file)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/serializers.py", line 169, in _read_with_length
    return self.loads(obj)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/serializers.py", line 451, in loads
    return pickle.loads(obj, encoding=encoding)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/mllib/__init__.py", line 28, in <module>
    import numpy
ModuleNotFoundError: No module named 'numpy'

    at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:193)
    at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:234)
    at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:152)
    at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:63)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
    at org.apache.spark.scheduler.Task.run(Task.scala:99)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:322)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1435)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1423)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1422)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1422)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802)
    at scala.Option.foreach(Option.scala:257)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1650)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1605)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1594)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1925)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1938)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1951)
    at org.apache.spark.api.python.PythonRDD$.runJob(PythonRDD.scala:441)
    at org.apache.spark.api.python.PythonRDD.runJob(PythonRDD.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
    at java.lang.reflect.Method.invoke(Unknown Source)
    at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
    at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
    at py4j.Gateway.invoke(Gateway.java:280)
    at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
    at py4j.commands.CallCommand.execute(CallCommand.java:79)
    at py4j.GatewayConnection.run(GatewayConnection.java:214)
    at java.lang.Thread.run(Unknown Source)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/worker.py", line 160, in main
    func, profiler, deserializer, serializer = read_command(pickleSer, infile)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/worker.py", line 54, in read_command
    command = serializer._read_with_length(file)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/serializers.py", line 169, in _read_with_length
    return self.loads(obj)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/serializers.py", line 451, in loads
    return pickle.loads(obj, encoding=encoding)
  File "/usr/hdp/current/spark2-client/python/lib/pyspark.zip/pyspark/mllib/__init__.py", line 28, in <module>
    import numpy
ModuleNotFoundError: No module named 'numpy'

    at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:193)
    at org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:234)
    at org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:152)
    at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:63)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
    at org.apache.spark.scheduler.Task.run(Task.scala:99)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:322)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
    at java.lang.Thread.run(Thread.java:745)

Данные загружается через функцию sql и преобразуется в RDD, чтобы использовать функцию классификатора дерева дескрипции mlib для RDD, но по какой-то причине ошибка функции в классификаторе. Любые комментарии или предложения приветствуются.

1 Ответ

0 голосов
/ 14 февраля 2020

Причина указана в трассировке стека ошибок.

ModuleNotFoundError: Нет модуля с именем 'numpy'

Вам просто нужно установить numpy

pip install numpy

Вообще говоря, Spark делает не проделайте большую работу по возвращению фактической причины исключения водителю. Вам нужно go frthur вниз в трассировке стека ошибок, чтобы найти его.

...