В некоторых случаях кажется, что кластер dask зависает при перезапуске
, чтобы имитировать это, я написал этот глупый код:
import contextlib2
from distributed import Client, LocalCluster
for i in xrange(100):
print i
with contextlib2.ExitStack() as es:
cluster = LocalCluster(processes=True, n_workers=4)
client = Client(cluster)
es.callback(client.close)
es.callback(es.callback(client.close))
Этот код никогда не завершит цикл, который я получаюэта ошибка
raise_exc_info(self._exc_info)
File "//anaconda/lib/python2.7/site-packages/tornado/gen.py", line 1141, in run
yielded = self.gen.throw(*exc_info)
File "//anaconda/lib/python2.7/site-packages/distributed/deploy/local.py", line 191, in _start
yield [self._start_worker(**self.worker_kwargs) for i in range(n_workers)]
File "//anaconda/lib/python2.7/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "//anaconda/lib/python2.7/site-packages/tornado/concurrent.py", line 269, in result
raise_exc_info(self._exc_info)
File "//anaconda/lib/python2.7/site-packages/tornado/gen.py", line 883, in callback
result_list.append(f.result())
File "//anaconda/lib/python2.7/site-packages/tornado/concurrent.py", line 269, in result
raise_exc_info(self._exc_info)
File "//anaconda/lib/python2.7/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "//anaconda/lib/python2.7/site-packages/distributed/deploy/local.py", line 217, in _start_worker
raise gen.TimeoutError("Worker failed to start")
Я использую dask распределенный 1.25.1 и python 2.7, работающий на mac