Ошибка тензор потока: размеры должны быть равны - PullRequest
0 голосов
/ 03 ноября 2019

Класс моей модели:

from tensorflow import keras
from tensorflow.keras import backend as K
import tensorflow as tf
import time

class ObjectLocalizer ( object ) :

def __init__(self, input_shape):

    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
    alpha = 0.2

    def calculate_iou(target_boxes, pred_boxes):
        xA = K.maximum(target_boxes[..., 0], pred_boxes[..., 0])
        yA = K.maximum(target_boxes[..., 1], pred_boxes[..., 1])
        xB = K.minimum(target_boxes[..., 2], pred_boxes[..., 2])
        yB = K.minimum(target_boxes[..., 3], pred_boxes[..., 3])
        interArea = K.maximum(0.0, xB - xA) * K.maximum(0.0, yB - yA)
        boxAArea = (target_boxes[..., 2] - target_boxes[..., 0]) * (target_boxes[..., 3] - target_boxes[..., 1])
        boxBArea = (pred_boxes[..., 2] - pred_boxes[..., 0]) * (pred_boxes[..., 3] - pred_boxes[..., 1])
        iou = interArea / (boxAArea + boxBArea - interArea)
        return iou

    def custom_loss(y_true, y_pred):
        mse = tf.losses.mean_squared_error(y_true, y_pred)
        iou = calculate_iou(y_true, y_pred)
        return mse + (1 - iou)

    def iou_metric(y_true, y_pred):
        return calculate_iou(y_true, y_pred)

    model_layers = [
        keras.layers.Conv2D(16, kernel_size=(3, 3), strides=1, input_shape=input_shape),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Conv2D(16, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.MaxPooling2D(pool_size=(2, 2)),

        keras.layers.Conv2D(32, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Conv2D(32, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.MaxPooling2D(pool_size=(2, 2)),

        keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Conv2D(64, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.MaxPooling2D(pool_size=(2, 2)),

        keras.layers.Conv2D(128, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Conv2D(128, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.MaxPooling2D(pool_size=(2, 2)),

        keras.layers.Conv2D(256, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Conv2D(256, kernel_size=(3, 3), strides=1),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.MaxPooling2D(pool_size=(2, 2)),

        keras.layers.Flatten(),

        keras.layers.Dense(1240),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Dense(640),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Dense(480),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Dense(120),
        keras.layers.LeakyReLU(alpha=alpha),
        keras.layers.Dense(62),
        keras.layers.LeakyReLU(alpha=alpha),

        keras.layers.Dense( 7 ),
        keras.layers.LeakyReLU(alpha=alpha),
    ]

    self.__model = keras.Sequential(model_layers)
    self.__model.compile(
        optimizer=keras.optimizers.Adam(lr=0.0001),
        loss=custom_loss,
        metrics=[iou_metric]
    )



def fit(self, X, Y, hyperparameters):
    initial_time = time.time()
    self.__model.fit(X, Y,
                     batch_size=hyperparameters['batch_size'],
                     epochs=hyperparameters['epochs'],
                     callbacks=hyperparameters['callbacks'],
                     validation_data=hyperparameters['val_data']
                     )
    final_time = time.time()
    eta = (final_time - initial_time)
    time_unit = 'seconds'
    if eta >= 60:
        eta = eta / 60
        time_unit = 'minutes'
    self.__model.summary()
    print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(hyperparameters['epochs'], eta, time_unit))


def evaluate(self, test_X, test_Y):
    return self.__model.evaluate(test_X, test_Y)


def predict(self, X):
    predictions = self.__model.predict(X)
    return predictions


def save_model(self, file_path):
    self.__model.save(file_path)


def load_model(self, file_path):
    self.__model = keras.models.load_model(file_path)

def load_model_weights(self , file_path ) :
    self.__model.load_weights( file_path )

Класс обработки моих данных:

from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import xmltodict
import numpy as np
import glob
import os


image_dim = 1000
images_dir = 'images'
output_dir = 'processed_data'

xml_filepaths = glob.glob( os.path.join( images_dir , '*.xml' ) )
jpg_filepaths = glob.glob( os.path.join( images_dir , '*.jpg' ) )

images = []
for filepath in jpg_filepaths:
    image = Image.open( filepath ).resize( ( image_dim , image_dim ) )
    images.append( np.asarray( image ) / 255 )

bboxes = []
classes = []
for filepath in xml_filepaths:
    bbox_dict = xmltodict.parse( open( filepath , 'rb' ) )
    classes.append( bbox_dict[ 'annotation' ][ 'object' ][ 'name' ] )
    bndbox = bbox_dict[ 'annotation' ][ 'object' ][ 'bndbox' ]
    bounding_box = [ 0.0 ] * 4
    bounding_box[0] = int(bndbox[ 'xmin' ]) / image_dim
    bounding_box[1] = int(bndbox[ 'ymin' ]) / image_dim
    bounding_box[2] = int(bndbox[ 'xmax' ]) / image_dim
    bounding_box[3] = int(bndbox[ 'ymax' ]) / image_dim
    bboxes.append( bounding_box )

bboxes = np.array( bboxes )
classes = np.array( classes )
encoder = LabelBinarizer()
classes_onehot = encoder.fit_transform( classes )

X = images
Y = np.concatenate( [ bboxes , classes_onehot ] , axis=1 )

train_features , test_features ,train_labels, test_labels = train_test_split( X , Y , test_size=0.1)

np.save( os.path.join( output_dir , 'x.npy' ) , train_features )
np.save( os.path.join( output_dir , 'y.npy' )  , train_labels )
np.save( os.path.join( output_dir , 'test_x.npy' ) , test_features )
np.save( os.path.join( output_dir , 'test_y.npy' ) , test_labels )

Мой основной:

from . import ObjectLocalizer
from PIL import Image , ImageDraw
import numpy as np

input_dim = 1000

X = np.load( 'processed_data/x.npy')
Y = np.load( 'processed_data/y.npy')
test_X = np.load( 'processed_data/test_x.npy')
test_Y = np.load( 'processed_data/test_y.npy')

print( X.shape )
print( Y.shape )
print( test_X.shape )
print( test_Y.shape )

localizer = ObjectLocalizer( input_shape=( input_dim , input_dim , 3 ) )
#localizer.load_model( 'models/model.h5')

parameters = {
    'batch_size' : 3,
    'epochs' : 10 ,
    'callbacks' : None ,
    'val_data' : ( test_X , test_Y )
}

localizer.fit( X , Y  , hyperparameters=parameters )
localizer.save_model( 'models/newmodel.h5')

В моем наборе данных всего 10 изображений jpg(Размерность 1000 x 1000 x 3) и 10 XML (это всего лишь тест), я хочу сделать что-то вроде этого: https://towardsdatascience.com/getting-started-with-bounding-box-regression-in-tensorflow-743e22d0ccb3

Мне нужно понять эту ошибку:

Traceback (most recent call last):

  File "<ipython-input-2-0ba335cd51e8>", line 27, in <module>
    localizer.fit( X , Y  , hyperparameters=parameters )

  File "<ipython-input-1-cd4b01a76019>", line 95, in fit
    validation_data=hyperparameters['val_data']

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training.py", line 728, in fit
    use_multiprocessing=use_multiprocessing)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 324, in fit
    total_epochs=epochs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 123, in run_one_epoch
    batch_outs = execution_function(iterator)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 86, in execution_function
    distributed_function(input_fn))

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\def_function.py", line 457, in __call__
    result = self._call(*args, **kwds)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\def_function.py", line 503, in _call
    self._initialize(args, kwds, add_initializers_to=initializer_map)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\def_function.py", line 408, in _initialize
    *args, **kwds))

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\function.py", line 1848, in _get_concrete_function_internal_garbage_collected
    graph_function, _, _ = self._maybe_define_function(args, kwargs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\function.py", line 2150, in _maybe_define_function
    graph_function = self._create_graph_function(args, kwargs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\function.py", line 2041, in _create_graph_function
    capture_by_value=self._capture_by_value),

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\func_graph.py", line 915, in func_graph_from_py_func
    func_outputs = python_func(*func_args, **func_kwargs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\def_function.py", line 358, in wrapped_fn
    return weak_wrapped_fn().__wrapped__(*args, **kwds)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 73, in distributed_function
    per_replica_function, args=(model, x, y, sample_weights))

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\distribute\distribute_lib.py", line 760, in experimental_run_v2
    return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\distribute\distribute_lib.py", line 1787, in call_for_each_replica
    return self._call_for_each_replica(fn, args, kwargs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\distribute\distribute_lib.py", line 2132, in _call_for_each_replica
    return fn(*args, **kwargs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\autograph\impl\api.py", line 292, in wrapper
    return func(*args, **kwargs)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 264, in train_on_batch
    output_loss_metrics=model._output_loss_metrics)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_eager.py", line 311, in train_on_batch
    output_loss_metrics=output_loss_metrics))

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_eager.py", line 252, in _process_single_batch
    training=training))

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_eager.py", line 166, in _model_loss
    per_sample_losses = loss_fn.call(targets[i], outs[i])

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\losses.py", line 221, in call
    return self.fn(y_true, y_pred, **self._fn_kwargs)

  File "<ipython-input-1-cd4b01a76019>", line 25, in custom_loss
    mse = tf.losses.mean_squared_error(y_true, y_pred)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\losses.py", line 771, in mean_squared_error
    return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\gen_math_ops.py", line 11013, in squared_difference
    "SquaredDifference", x=x, y=y, name=name)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\op_def_library.py", line 793, in _apply_op_helper
    op_def=op_def)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\func_graph.py", line 548, in create_op
    compute_device)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\ops.py", line 3429, in _create_op_internal
    op_def=op_def)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\ops.py", line 1773, in __init__
    control_input_ops)

  File "C:\Users\.Admin\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\ops.py", line 1613, in _create_c_op
    raise ValueError(str(e))

ValueError: Dimensions must be equal, but are 7 and 5 for 'loss/leaky_re_lu_15_loss/SquaredDifference' (op: 'SquaredDifference') with input shapes: [3,7], [3,5].
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...