ResourceExhaustedErrorTraceback (most recent call last)
<ipython-input-8-cb1025b61acf> in <module>()
----> 1 history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=30,validation_data=validation_generator, validation_steps=50)
/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.pyc in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/usr/local/lib/python2.7/dist-packages/keras/models.pyc in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1251 use_multiprocessing=use_multiprocessing,
1252 shuffle=shuffle,
-> 1253 initial_epoch=initial_epoch)
1254
1255 @interfaces.legacy_generator_methods_support
/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.pyc in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/usr/local/lib/python2.7/dist-packages/keras/engine/training.pyc in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
2242 outs = self.train_on_batch(x, y,
2243 sample_weight=sample_weight,
-> 2244 class_weight=class_weight)
2245
2246 if not isinstance(outs, list):
/usr/local/lib/python2.7/dist-packages/keras/engine/training.pyc in train_on_batch(self, x, y, sample_weight, class_weight)
1888 ins = x + y + sample_weights
1889 self._make_train_function()
-> 1890 outputs = self.train_function(ins)
1891 if len(outputs) == 1:
1892 return outputs[0]
/usr/local/lib/python2.7/dist-packages/keras/backend/tensorflow_backend.pyc in __call__(self, inputs)
2473 session = get_session()
2474 updated = session.run(fetches=fetches, feed_dict=feed_dict,
-> 2475 **self.session_kwargs)
2476 return updated[:len(self.outputs)]
2477
/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in run(self, fetches, feed_dict, options, run_metadata)
893 try:
894 result = self._run(None, fetches, feed_dict, options_ptr,
--> 895 run_metadata_ptr)
896 if run_metadata:
897 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in _run(self, handle, fetches, feed_dict, options, run_metadata)
1126 if final_fetches or final_targets or (handle and feed_dict_tensor):
1127 results = self._do_run(handle, final_targets, final_fetches,
-> 1128 feed_dict_tensor, options, run_metadata)
1129 else:
1130 results = []
/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1342 if handle is None:
1343 return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1344 options, run_metadata)
1345 else:
1346 return self._do_call(_prun_fn, self._session, handle, feeds, fetches)
/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc in _do_call(self, fn, *args)
1361 except KeyError:
1362 pass
-> 1363 raise type(e)(node_def, op, message)
1364
1365 def _extend_graph(self):
ResourceExhaustedError: OOM when allocating tensor with shape[6272,512] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[Node: training/RMSprop/mul_24 = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](RMSprop/rho/read, training/RMSprop/Variable_8/read)]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[[Node: metrics/acc/Mean_1/_109 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_774_metrics/acc/Mean_1", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
Caused by op u'training/RMSprop/mul_24', defined at:
File "/usr/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/usr/local/lib/python2.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python2.7/dist-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelapp.py", line 486, in start
self.io_loop.start()
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "/usr/local/lib/python2.7/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events
self._handle_recv()
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tornado/stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-cb1025b61acf>", line 1, in <module>
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=30,validation_data=validation_generator, validation_steps=50)
File "/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/models.py", line 1253, in fit_generator
initial_epoch=initial_epoch)
File "/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 2088, in fit_generator
self._make_train_function()
File "/usr/local/lib/python2.7/dist-packages/keras/engine/training.py", line 990, in _make_train_function
loss=self.total_loss)
File "/usr/local/lib/python2.7/dist-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/keras/optimizers.py", line 251, in get_updates
new_a = self.rho * a + (1. - self.rho) * K.square(g)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variables.py", line 775, in _run_op
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 907, in binary_op_wrapper
return func(x, y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py", line 1131, in _mul_dispatch
return gen_math_ops._mul(x, y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_math_ops.py", line 2798, in _mul
"Mul", x=x, y=y, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 3160, in create_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1625, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
ResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[6272,512] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[Node: training/RMSprop/mul_24 = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:GPU:0"](RMSprop/rho/read, training/RMSprop/Variable_8/read)]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[[Node: metrics/acc/Mean_1/_109 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_774_metrics/acc/Mean_1", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
Я пытаюсь установить Keras с помощью Docker для глубокого изучения с помощью графического процессора. Я нахожусь на github Keras (https://github.com/keras-team/keras/tree/master/docker) и не могу понять, как это сделать. Я новичок в Docker, и все образы докеров, которые мне удалось получить, были созданы с помощью команды 'docker pull'. НоЯ не вижу команду Docker pull для получения keras. Я не понимаю предоставленные инструкции 'make'.
У меня было чертовски много времени, пытаясь запустить Keras на моем компьютере с Linux. Сначала япытался установить CUDA, TF и все файлы прямо на мой компьютер, но у меня было так много проблем с совместимостью версий со всем программным обеспечением, что я отказался от этого и пытался упростить его с помощью докера, однако это было нелегколибо. Я попробовал несколько образов докера, включая ermaker / keras-jupyter и gw000 / keras-full, и не смог заставить их работать.
Используя gw000 / keras-full, я попытался запустить простойнейронные сети с классификатором кошки из книги Deep learning with Keras, и я получаю сообщение об ошибке,полностью заполнен. Я не знаю, почему я получаю эту ошибку. Это простой классификатор, который я не смог запустить на своем старом ноутбуке, и он по какой-то причине перегорел с моим RTX 2080TI.
Любая помощь сПопытка получить рабочую версию keras через docker была бы очень признательна.
Это код использования gw000 / keras-full. Я использую это для запуска докера с графическим процессором:
docker run -d $(ls /dev/nvidia* | xargs -I{} echo '--device={}') $(ls /usr/lib/*-linux-gnu/{libcuda,libnvidia}* | xargs -I{} echo '-v {}:{}:ro') -p 8888:8888 -v /home/name/Desktop:/srv gw000/keras-full
Когда я пытаюсь запустить обучение модели, это происходит в первую эпоху. Я вижу в ошибке, что он пытается запустить Python 2, что может быть проблемой, потому что он, вероятно, был написан с Python 3, но я не знаю, является ли это проблемой и как изменить его, чтобы использовать вместо Python 3. Как упоминалось ранее, код дословно взят из книги «Deep Learning with Keras» и отлично работал на моем старом ноутбуке. Я не могу понять, почему на моем компьютере ничего не работает.
Epoch 1/30
*SEE THE ATTACHED CODE SNIP FOR THE ERROR I GET*