Используйте стратегии tf.distribute с подклассами модели tf.keras - PullRequest
0 голосов
/ 28 февраля 2020

У меня в настоящее время есть подкласс модели tf.keras, но я не могу использовать стратегии распределения GPU, хотя на веб-сайте Tensorflow указано, что возможно, что я получаю ошибку, говорящую мне об обратном.

Одно решение I найдено было обернуть модель в tf.keras.models.Model, но это привело к ValueError: We currently do not support distribution strategy with a `Sequential` model that is created without `input_shape`/`input_dim` set in its first layer or a subclassed model. Это не решаемо для меня, потому что моя форма ввода (Нет, Нет) из-за того, что вход представляет собой группу последовательностей, которые не имеют одинаковую форму, и я не определяю их как одну и ту же форму.

Есть ли способ обойти это или использовать tf.distribute с подклассом Model?

nlupy_1     |   File "/app/src/main/python/mosaix/serve/api/__init__.py", line 201, in main
nlupy_1     |     init_state()
nlupy_1     |   File "/app/src/main/python/mosaix/serve/api/__init__.py", line 135, in init_state
nlupy_1     |     network_dict[key] = ParsingPipeline(pipeline_params, 'predict', path)
nlupy_1     |   File "/app/src/main/python/mosaix/learn/pipelines/pipelines.py", line 130, in __init__
nlupy_1     |     self.build()
nlupy_1     |   File "/app/src/main/python/mosaix/learn/pipelines/pipelines.py", line 173, in build
nlupy_1     |     self._load_model()
nlupy_1     |   File "/app/src/main/python/mosaix/learn/pipelines/base_pipeline.py", line 38, in wrapped
nlupy_1     |     return func(*args, **kwargs)
nlupy_1     |   File "/app/src/main/python/mosaix/learn/pipelines/base_pipeline.py", line 169, in _load_model
nlupy_1     |     self.model_inference(warm_up_query, warm_up_annotated)
nlupy_1     |   File "/app/src/main/python/mosaix/learn/pipelines/pipelines.py", line 186, in model_inference
nlupy_1     |     return self._model_inference((raw, annotated))
nlupy_1     |   File "/app/src/main/python/mosaix/learn/pipelines/pipelines.py", line 198, in _model_inference
nlupy_1     |     bio_logits, intent_logits = self.model.predict(inputs)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training.py", line 909, in predict
nlupy_1     |     use_multiprocessing=use_multiprocessing)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_distributed.py", line 760, in predict
nlupy_1     |     callbacks=callbacks)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_arrays.py", line 189, in model_iteration
nlupy_1     |     f = _make_execution_function(model, mode)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_arrays.py", line 564, in _make_execution_function
nlupy_1     |     return distributed_training_utils._make_execution_function(model, mode)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/distribute/distributed_training_utils.py", line 842, in _make_execution_function
nlupy_1     |     return _make_execution_function_with_cloning(model, mode)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/distribute/distributed_training_utils.py", line 935, in _make_execution_function_with_cloning
nlupy_1     |     _make_replicated_models_with_cloning(model, mode)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/distribute/distributed_training_utils.py", line 915, in _make_replicated_models_with_cloning
nlupy_1     |     _build_distributed_network(model, strategy, mode)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/distribute/distributed_training_utils.py", line 783, in _build_distributed_network
nlupy_1     |     args=(model, mode, inputs, targets))
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/distribute/distribute_lib.py", line 1787, in call_for_each_replica
nlupy_1     |     return self._call_for_each_replica(fn, args, kwargs)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/distribute/parameter_server_strategy.py", line 442, in _call_for_each_replica
nlupy_1     |     self._container_strategy(), self._device_map, fn, args, kwargs)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 196, in _call_for_each_replica
nlupy_1     |     coord.join(threads)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/coordinator.py", line 389, in join
nlupy_1     |     six.reraise(*self._exc_info_to_raise)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/six.py", line 693, in reraise
nlupy_1     |     raise value
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/coordinator.py", line 297, in stop_on_exception
nlupy_1     |     yield
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/distribute/mirrored_strategy.py", line 879, in run
nlupy_1     |     self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/distribute/distributed_training_utils.py", line 743, in _build_network_on_replica
nlupy_1     |     model, input_tensors=inputs, layer_fn=models.share_weights)
nlupy_1     |   File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/models.py", line 165, in _clone_functional_model
nlupy_1     |     raise ValueError('Expected `model` argument '
nlupy_1     | ValueError: Expected `model` argument to be a functional `Model` instance, but got a subclass model instead.
nlupy_1     | 
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...