Я пытаюсь TensorFlow Lite измерить сеть, которая будет реализована на FPGA. В качестве первого шага я немного изменил ваш пример, и он дает неожиданную ошибку. Вот Colab https://colab.research.google.com/drive/1H_DGK2VjIKSNhNboW_XfqHr7kzXR-rrI на случай, если кто-то заинтересован в воспроизведении проблемы.
Best, Denis.
Код и ошибка:
! pip uninstall -y tensorflow
! pip install -q tf-nightly
! pip install -q tensorflow-model-optimization
import tempfile
import os
import tensorflow as tf
from tensorflow import keras
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the MODIFIED model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3),name='conv1'),
keras.layers.BatchNormalization(),
keras.layers.ReLU(),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
Загрузка данных из https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz 11493376/11490434 [=============================] - 0s 0us / step 1688/1688 [====================================] - 4s 3ms / step - потеря: 0.2025 - точность: 0.9386 - val_loss: 0,0901 - val_accuracy: 0,9735
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
q_aware_model = quantize_model(model)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-5fc1d8762a1f> in <module>()
4
5 # q_aware stands for for quantization aware.
----> 6 q_aware_model = quantize_model(model)
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_model_optimization/python/core/quantization/keras/quantize.py in quantize_model(to_quantize)
136
137 annotated_model = quantize_annotate_model(to_quantize)
--> 138 return quantize_apply(annotated_model)
139
140
/usr/local/lib/python3.6/dist-packages/tensorflow_model_optimization/python/core/quantization/keras/quantize.py in quantize_apply(model)
401 # layer_quantize_map gets modified by the transformations.
402 transformed_model, layer_quantize_map = quantize_transform.apply(
--> 403 unwrapped_model, layer_quantize_map)
404
405 # TODO(pulkitb): Think more about how to introduce Default specific code.
/usr/local/lib/python3.6/dist-packages/tensorflow_model_optimization/python/core/quantization/keras/default_8bit/default_8bit_quantize_layout_transform.py in apply(self, model, layer_quantize_map)
65 return model_transformer.ModelTransformer(
66 model, transforms,
---> 67 layer_quantize_map.keys(), layer_quantize_map).transform()
/usr/local/lib/python3.6/dist-packages/tensorflow_model_optimization/python/core/quantization/keras/graph_transformations/model_transformer.py in transform(self)
550 else:
551 transformed_model = keras.Sequential.from_config(self._config,
--> 552 custom_objects)
553
554 for layer in transformed_model.layers:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/sequential.py in from_config(cls, config, custom_objects)
498 layer = layer_module.deserialize(layer_config,
499 custom_objects=custom_objects)
--> 500 model.add(layer)
501 if (not model.inputs and build_input_shape and
502 isinstance(build_input_shape, (tuple, list))):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
454 self._self_setattr_tracking = False # pylint: disable=protected-access
455 try:
--> 456 result = method(self, *args, **kwargs)
457 finally:
458 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/sequential.py in add(self, layer)
227 # If the model is being built continuously on top of an input layer:
228 # refresh its output.
--> 229 output_tensor = layer(self.outputs[0])
230 if len(nest.flatten(output_tensor)) != 1:
231 raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
892 # are casted, not before.
893 input_spec.assert_input_compatibility(self.input_spec, inputs,
--> 894 self.name)
895 if (any(isinstance(x, ragged_tensor.RaggedTensor) for x in input_list)
896 and not self._supports_ragged_inputs):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
178 'expected ndim=' + str(spec.ndim) + ', found ndim=' +
179 str(ndim) + '. Full shape received: ' +
--> 180 str(x.shape.as_list()))
181 if spec.max_ndim is not None:
182 ndim = x.shape.ndims
ValueError: Input 0 of layer conv1 is incompatible with the layer: expected ndim=4, found ndim=2. Full shape received: [None, 196]