Я пытаюсь распараллелить программу matlab, используя spark (python 3.5), и у меня возникают проблемы с этим.Я не понимаю, если это проблема взаимодействия из-за типов объектов, которые, возможно, не совпадают между Matlab и Python, или если у меня есть другая проблема интерна.
from pyspark import SparkConf, SparkContext
import matlab.engine
import numpy as np
if __name__ == "__main__":
conf = SparkConf().setAppName("reduce").setMaster("local[*]")
sc = SparkContext(conf = conf)
eng = matlab.engine.start_matlab()
x=eng.getListOfData()
myrdd=sc.parallelize(x)
object = myrdd.map(lambda x :np.array((eng.myfunc(x))._data)).collect()
Это то, что я получаю
Undefined function or variable '__getstate__'.
Traceback (most recent call last):
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\cloudpickle.py", line 148, in dump
return Pickler.dump(self, obj)
File "C:\Program Files\Python 3.5\lib\pickle.py", line 408, in dump
self.save(obj)
File "C:\Program Files\Python 3.5\lib\pickle.py", line 475, in save
f(self, obj) # Call unbound method with explicit self
File "C:\Program Files\Python 3.5\lib\pickle.py", line 740, in save_tuple
save(element)
File "C:\Program Files\Python 3.5\lib\pickle.py", line 475, in save
f(self, obj) # Call unbound method with explicit self
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\cloudpickle.py", line 255, in save_function
self.save_function_tuple(obj)
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\cloudpickle.py", line 292, in save_function_tuple
save((code, closure, base_globals))
File "C:\Program Files\Python 3.5\lib\pickle.py", line 475, in save
f(self, obj) # Call unbound method with explicit self
File "C:\Program Files\Python 3.5\lib\pickle.py", line 725, in save_tuple
save(element)
.....
matlab.engine.MatlabExecutionError: Undefined function or variable '__getstate__'.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:.../mySparkProgram.py", line 20, in <module>
EAPa = bandesrdd.map(lambda x : (eng.apa(x))).collect()
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\rdd.py", line 796, in collect
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\rdd.py", line 2442, in _jrdd
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\rdd.py", line 2375, in _wrap_function
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\rdd.py", line 2361, in _prepare_for_python_RDD
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\serializers.py", line 464, in dumps
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\cloudpickle.py", line 704, in dumps
File "C:...\spark-2.2.3-bin-hadoop2.7\python\lib\pyspark.zip\pyspark\cloudpickle.py", line 162, in dump
_pickle.PicklingError: Could not serialize object: MatlabExecutionError: Undefined function or variable '__getstate__'.