Привет, я новичок в pyspark, поскольку я только что изучил его 1 неделю go, и я ищу помощь по этой ошибке:
ImportError: Нет модуля с именем numpy
Любые добрые души, способные понять, почему мой numpy не найден? Я попробовал следующее: Удалите numpy и установите его снова через anaconda cmd от имени администратора. Проверьте мою переменную окружения на python_home. перезагрузите ядро моего ноутбука Jupyter
def parse_line(l):
try:
return l.split(",")
except:
print("error in processing {0}".format(l))
data = sc.textFile('YearPredictionMSD.txt').map(lambda x : parse_line(x)).toDF()
data_label = data.rdd.map(lambda x: LabeledPoint(x[0], x[1:]))
data_train = data_label.zipWithIndex().filter(lambda x: x[1] < 463715)
data_test = data_label.zipWithIndex().filter(lambda x: x[1] >= 463715)
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<ipython-input-4-ed224fb17ae0> in <module>
----> 1 data_train = data_label.zipWithIndex().filter(lambda x: x[1] < 463715)
2
3 data_test = data_label.zipWithIndex().filter(lambda x: x[1] >= 463715)
C:\spark-3.0.0-preview2-bin-hadoop2.7\python\pyspark\rdd.py in zipWithIndex(self)
2244 starts = [0]
2245 if self.getNumPartitions() > 1:
-> 2246 nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
2247 for i in range(len(nums) - 1):
2248 starts.append(starts[-1] + nums[i])
C:\spark-3.0.0-preview2-bin-hadoop2.7\python\pyspark\rdd.py in collect(self)
887 """
888 with SCCallSiteSync(self.context) as css:
--> 889 sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
890 return list(_load_from_socket(sock_info, self._jrdd_deserializer))
891
C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\py4j-0.10.8.1-src.zip\py4j\java_gateway.py in __call__(self, *args)
1284 answer = self.gateway_client.send_command(command)
1285 return_value = get_return_value(
-> 1286 answer, self.gateway_client, self.target_id, self.name)
1287
1288 for temp_arg in temp_args:
C:\spark-3.0.0-preview2-bin-hadoop2.7\python\pyspark\sql\utils.py in deco(*a, **kw)
96 def deco(*a, **kw):
97 try:
---> 98 return f(*a, **kw)
99 except py4j.protocol.Py4JJavaError as e:
100 converted = convert_exception(e.java_exception)
C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\py4j-0.10.8.1-src.zip\py4j\protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 1.0 failed 1 times, most recent failure: Lost task 2.0 in stage 1.0 (TID 3, DESKTOP-MRGDUK2, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\worker.py", line 579, in main
File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\worker.py", line 71, in read_command
File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\serializers.py", line 172, in _read_with_length
return self.loads(obj)
File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\serializers.py", line 700, in loads
return pickle.loads(obj, encoding=encoding)
File "C:\spark-3.0.0-preview2-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\mllib\__init__.py", line 28, in <module>
import numpy
ModuleNotFoundError: No module named 'numpy'