Почему прогнозы отличаются для Autoencoder и Encoder + Decoder? - PullRequest
1 голос
/ 13 октября 2019

Я создаю 1N CNN Autoencoder в Керасе, следуя совету в этом вопросе , где Encoder и Decoder разделены. Моя цель состоит в том, чтобы повторно использовать декодер после обучения автоэнкодеру. Центральный слой моего автоэнкодера - слой Dense, потому что я хотел бы изучить его позже.

Моя проблема заключается в том, что если я скомпилирую и подгоню весь автоэнкодер, записанный как Decoder()Encoder()(x), где x - это ввод, я получу другой прогноз, когда я

autoencoder.predict (training_set)

, если я сначала закодирую обучающий набор в наборе центральных функций, а затем разрешу декодеру их декодировать. Эти два подхода должны давать идентичные ответы, после того как автоэнкодер обучен.

from tensorflow.keras.layers import Input, Dense, BatchNormalization, Flatten, Lambda, Activation, Conv1D, MaxPooling1D, UpSampling1D, Reshape
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from tensorflow.keras.layers import GaussianNoise
import keras.backend as K
from tensorflow.keras.layers import Add

import tensorflow as tf

import scipy.io
import sys
import matplotlib.pyplot as plt
import numpy as np
import copy


training = # some training set, 1500 samples of 501 point each
testing = # some testing set, 500 samples of 501 point each

# reshaping for CNN
training = np.reshape(training, [1500, 501, 1])
testing = np.reshape(testing, [500, 501, 1])


# normalize input
X_mean = training.mean()
oscillations -= X_mean
X_std = training.std()
training /= X_std


copy_of_test = copy.copy(testing)
testing -= X_mean
testing /= X_std

### MODEL ###

def Encoder():
    encoder_input = Input(batch_shape=(None, 501, 1))  
    e1 = Conv1D(256,3, activation='tanh', padding='valid')(encoder_input)
    e2 = MaxPooling1D(2)(e1)
    e3 = Conv1D(32,3, activation='tanh', padding='valid')(e2)
    e4 = MaxPooling1D(2)(e3)
    e5 = Flatten()(e4)
    encoded = Dense(32,activation = 'tanh')(e5)
    return Model(encoder_input, encoded)


def Decoder():
    encoded_input = Input(shape=(32,))  
    encoded_reshaped = Reshape((32,1))(encoded_input)
    d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
    d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
    d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
    d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
    d5 = Flatten(name='decod_flatten')(d4)
    d6 = Dense(501, name='decod_dense1')(d5)
    decoded = Reshape((501,1), name='decod_reshape')(d6)
    return Model(encoded_input, decoded)


# define input to the model:
x = Input(batch_shape=(None, 501, 1))
y = Input(shape=(32,))

# make the model:
autoencoder = Model(x, Decoder()(Encoder()(x)))

# compile the model:
autoencoder.compile(optimizer='adam', loss='mse')
for layer in autoencoder.layers: print(K.int_shape(layer.output))


epochs = 100
batch_size = 100
validation_split = 0.2
# train the model
history = autoencoder.fit(x = training, y = training,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split)

# Encoder
encoder = Model(inputs=x, outputs=Encoder()(x), name='encoder')
print('enc:')
for layer in encoder.layers: print(K.int_shape(layer.output))
features = encoder.predict(training) # features

# Decoder
decoder = Model(inputs=y, outputs=Decoder()(y), name='decoder')
print('dec:')
for layer in decoder.layers: print(K.int_shape(layer.output))
score = decoder.predict(features) # 
score = np.squeeze(score)    

predictions = autoencoder.predict(training)
predictions = np.squeeze(predictions)

# plotting one random case
# score should be equal to predictions!
# because score is obtained from the trained decoder acting on the encoded features, while predictions are obtained form the Autoencoder acting on the training set 
plt.plot(score[100], label='eD')
plt.plot(predictions[100], label='AE')
plt.legend()
plt.show()
plt.close()

РЕДАКТИРОВАТЬ после ответа OverLordGoldDragon:

Я реализовал предложение в ответ, написавследующее в том же файле:

def reset_seeds():
    np.random.seed(1)
    random.seed(2)
    if tf.__version__[0] == '2':
        tf.random.set_seed(3)
    else:
        tf.set_random_seed(3)
    print("RANDOM SEEDS RESET")


def Encoder():
    encoder_input = Input(batch_shape=(None, 501, 1))  
    e1 = Conv1D(256,3, activation='tanh', padding='valid')(encoder_input)
    e2 = MaxPooling1D(2)(e1)
    e3 = Conv1D(32,3, activation='tanh', padding='valid')(e2)
    e4 = MaxPooling1D(2)(e3)
    e5 = Flatten()(e4)
    encoded = Dense(32,activation = 'tanh')(e5)
    encoded = Reshape((32,1))(encoded)
    return Model(encoder_input, encoded)


def Decoder():
    encoded_input = Input(shape=(32,))  
    encoded_reshaped = Reshape((32,1))(encoded_input)
    d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
    d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
    d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
    d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
    d5 = Flatten(name='decod_flatten')(d4)
    d6 = Dense(501, name='decod_dense1')(d5)
    decoded = Reshape((501,1), name='decod_reshape')(d6)
    return Model(encoded_input, decoded)


def DecoderAE(encoder_input, encoded_input):
    encoded_reshaped = Reshape((32,1))(encoded_input)
    d1 = Conv1D(32, 3, activation='tanh', padding='valid',
                       name='decod_conv1d_1')(encoded_reshaped)
    d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
    d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
    d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
    d5 = Flatten(name='decod_flatten')(d4)
    d6 = Dense(501, name='decod_dense1')(d5)
    decoded = Reshape((501,1), name='decod_reshape')(d6)
    return Model(encoder_input, decoded)


def load_weights(model, filepath):
    with h5py.File(filepath, mode='r') as f:
        file_layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
        model_layer_names = [layer.name for layer in model.layers]

        weight_values_to_load = []
        for name in file_layer_names:
            if name not in model_layer_names:
                print(name, "is ignored; skipping")
                continue
            g = f[name]
            weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]

            weight_values = []
            if len(weight_names) != 0:
                weight_values = [g[weight_name] for weight_name in weight_names]
            try:
                layer = model.get_layer(name=name)
            except:
                layer = None
            if layer is not None:
                symbolic_weights = (layer.trainable_weights + 
                                    layer.non_trainable_weights)
                if len(symbolic_weights) != len(weight_values):
                    print('Model & file weights shapes mismatch')
                else:
                    weight_values_to_load += zip(symbolic_weights, weight_values)

        K.batch_set_value(weight_values_to_load)


X = np.random.randn(10, 501, 1)
reset_seeds()
encoder = Encoder()
AE = DecoderAE(encoder.input, encoder.output)
AE.compile(optimizer='adam', loss='mse')


epochs = 10
batch_size = 100
validation_split = 0.2
# train the model
history = AE.fit(x = training, y = training,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split)


reset_seeds()
encoder = Encoder()
decoder = Decoder()

# Test equality
features = encoder.predict(X)
features = np.squeeze(features) # had to add this otherwise it would complain because of wrong shapes
score = decoder.predict(features) 
predictions = AE.predict(X)
print(np.sum(score - predictions))
# I am actually getting values >> 1


AE.save_weights('autoencoder_weights.h5')
AE_saved_weights = AE.get_weights()

decoder = Decoder()
load_weights(decoder, 'autoencoder_weights.h5')  # see "reference"
decoder_loaded_weights = decoder.get_weights()

AE_decoder_weights = AE_saved_weights[-len(decoder_loaded_weights):]
for w1, w2 in zip(AE_decoder_weights, decoder_loaded_weights):
    print(np.sum(w1 - w2))

Код выполняет обучение AE, однако

1) Я получаю значения >> 1 для разницы между score и predictions

2) код перестает выдавать

(u'input_1', 'is ignored; skipping')
(u'conv1d', 'is ignored; skipping')
(u'max_pooling1d', 'is ignored; skipping')
(u'conv1d_1', 'is ignored; skipping')
(u'max_pooling1d_1', 'is ignored; skipping')
(u'flatten', 'is ignored; skipping')
(u'dense', 'is ignored; skipping')
(u'reshape', 'is ignored; skipping')
(u'reshape_1', 'is ignored; skipping')
Traceback (most recent call last):
  File "Autoenc.py", line 256, in <module>
    load_weights(decoder, 'autoencoder_weights.h5')  # see "reference"
  File "Autoenc.py", line 219, in load_weights
    K.batch_set_value(weight_values_to_load)
  File "/home/user/anaconda3/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 2725, in batch_set_value
    assign_placeholder = tf.placeholder(tf_dtype,
AttributeError: 'module' object has no attribute 'placeholder'

2 РЕДАКТИРОВАТЬ

Вот мой новый файл после последнего комментария @OverLordGoldDragon. Я получаю сообщение об ошибке ниже.

def reset_seeds():
    np.random.seed(1)
    random.seed(2)
    if tf.__version__[0] == '2':
        tf.random.set_seed(3)
    else:
        tf.set_random_seed(3)
    print("RANDOM SEEDS RESET")


def Encoder():
    encoder_input = Input(batch_shape=(None, 501, 1))  
    e1 = Conv1D(256,3, activation='tanh', padding='valid')(encoder_input)
    e2 = MaxPooling1D(2)(e1)
    e3 = Conv1D(32,3, activation='tanh', padding='valid')(e2)
    e4 = MaxPooling1D(2)(e3)
    e5 = Flatten()(e4)
    encoded = Dense(32,activation = 'tanh')(e5)
    encoded = Reshape((32,1))(encoded)
    return Model(encoder_input, encoded)


def Decoder():
    encoded_input = Input(shape=(32,))  
    encoded_reshaped = Reshape((32,1))(encoded_input)
    d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
    d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
    d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
    d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
    d5 = Flatten(name='decod_flatten')(d4)
    d6 = Dense(501, name='decod_dense1')(d5)
    decoded = Reshape((501,1), name='decod_reshape')(d6)
    return Model(encoded_input, decoded)


def DecoderAE(encoder_input, encoded_input):
    encoded_reshaped = Reshape((32,1))(encoded_input)
    d1 = Conv1D(32, 3, activation='tanh', padding='valid',
                       name='decod_conv1d_1')(encoded_reshaped)
    d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
    d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
    d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
    d5 = Flatten(name='decod_flatten')(d4)
    d6 = Dense(501, name='decod_dense1')(d5)
    decoded = Reshape((501,1), name='decod_reshape')(d6)
    return Model(encoder_input, decoded)


def load_weights(model, filepath):
    with h5py.File(filepath, mode='r') as f:
        file_layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
        model_layer_names = [layer.name for layer in model.layers]

        weight_values_to_load = []
        for name in file_layer_names:
            if name not in model_layer_names:
                print(name, "is ignored; skipping")
                continue
            g = f[name]
            weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]

            weight_values = []
            if len(weight_names) != 0:
                weight_values = [g[weight_name] for weight_name in weight_names]
            try:
                layer = model.get_layer(name=name)
            except:
                layer = None
            if layer is not None:
                symbolic_weights = (layer.trainable_weights + 
                                    layer.non_trainable_weights)
                if len(symbolic_weights) != len(weight_values):
                    print('Model & file weights shapes mismatch')
                else:
                    weight_values_to_load += zip(symbolic_weights, weight_values)

        K.batch_set_value(weight_values_to_load)


X = np.random.randn(10, 501, 1)
reset_seeds()
encoder = Encoder()
AE = DecoderAE(encoder.input, encoder.output)
AE.compile(optimizer='adam', loss='mse')


epochs = 2
batch_size = 100
validation_split = 0.2
# train the model
history = AE.fit(x = training, y = training,
                    epochs=epochs,
                    batch_size=batch_size,
                    validation_split=validation_split)


reset_seeds()
encoder = Encoder()
decoder = Decoder()
decoder.save_weights('decoder_weights.h5')



AE.save_weights('autoencoder_weights.h5')
AE_saved_weights = AE.get_weights()

decoder = Decoder()
load_weights(decoder, 'autoencoder_weights.h5')  # see "reference"
decoder_loaded_weights = decoder.get_weights()

# Test equality
features = encoder.predict(X)
features = np.squeeze(features) 
score = decoder.predict(features) 
predictions = AE.predict(X)
print(np.sum(score - predictions))


AE_decoder_weights = AE_saved_weights[-len(decoder_loaded_weights):]
for w1, w2 in zip(AE_decoder_weights, decoder_loaded_weights):
    print(np.sum(w1 - w2))



Traceback (most recent call last):
  File "Autoenc_pazzo.py", line 251, in <module>
    decoder_loaded_weights = decoder.get_weights()
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/training.py", line 153, in get_weights
    return super(Model, self).get_weights()
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 1130, in get_weights
    return backend.batch_get_value(params)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/backend.py", line 3010, in batch_get_value
    return get_session(tensors).run(tensors)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 950, in run
    run_metadata_ptr)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1173, in _run
    feed_dict_tensor, options, run_metadata)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1350, in _do_run
    run_metadata)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1370, in _do_call
    raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Error while reading resource variable decod_conv1d_1_2/bias from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/decod_conv1d_1_2/bias/N10tensorflow3VarE does not exist.
     [[node decod_conv1d_1_2/bias/Read/ReadVariableOp (defined at Autoenc_pazzo.py:168) ]]

Original stack trace for u'decod_conv1d_1_2/bias/Read/ReadVariableOp':
  File "Autoenc_pazzo.py", line 249, in <module>
    decoder = Decoder()
  File "Autoenc_pazzo.py", line 168, in Decoder
    d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 591, in __call__
    self._maybe_build(inputs)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 1881, in _maybe_build
    self.build(input_shapes)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/layers/convolutional.py", line 174, in build
    dtype=self.dtype)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 384, in add_weight
    aggregation=aggregation)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/training/tracking/base.py", line 663, in _add_variable_with_custom_getter
    **kwargs_for_getter)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 155, in make_variable
    shape=variable_shape if variable_shape.rank else None)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 259, in __call__
    return cls._variable_v1_call(*args, **kwargs)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 220, in _variable_v1_call
    shape=shape)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 198, in <lambda>
    previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 2495, in default_variable_creator
    shape=shape)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 263, in __call__
    return super(VariableMetaclass, cls).__call__(*args, **kwargs)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 460, in __init__
    shape=shape)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 649, in _init_from_args
    value = self._read_variable_op()
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 935, in _read_variable_op
    self._dtype)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py", line 587, in read_variable_op
    "ReadVariableOp", resource=resource, dtype=dtype, name=name)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
    op_def=op_def)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
    return func(*args, **kwargs)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3616, in create_op
    op_def=op_def)
  File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2005, in __init__
    self._traceback = tf_stack.extract_stack()

1 Ответ

0 голосов
/ 14 октября 2019

Основная проблема с вашим кодом: отсутствие использования случайного семени. Я ссылаюсь на полное объяснение этого ответа и сосредотачиваюсь на вашем конкретном случае здесь.


Объяснение :

  • Заказсоздания экземпляра модели имеет значение, так как он изменяет инициализированные веса
  • При использовании, если вы тренируете несколько моделей, вы должны сбросить начальное число как при создании экземпляра модели , так и перед обучением
  • Ваши определения AE, Encoder и Decoder избыточно используют Input и усложняют самоанализ (например, .summary());Encoder() и Decoder() уже позаботятся об этом
  • Чтобы проверить, соответствуют ли веса загруженного декодера сохраненным обученным весам декодера AE, см. Пример ниже

РЕШЕНИЕ:

reset_seeds()
X = np.random.randn(10, 501, 1)  # '10' arbitrary
encoder_input = Input(batch_shape=(None, 501, 1))

reset_seeds()
encoder = Encoder()
decoder = Decoder()
autoencoder = Model(x, decoder(encoder(x)))
autoencoder.compile(optimizer='adam', loss='mse')

reset_seeds()
encoder = Encoder()
decoder = Decoder()
predictions = autoencoder.predict(X)

features = encoder.predict(X)
score = decoder.predict(features)

print(np.sum(score - predictions))
# 0.0  <-- 100% agreement

Пример сохранения / загрузки + предпочтительное определение AE; ссылка

Ваше определение AE ограничивает самоанализ посредством, например, .summary();вместо этого определите, как показано ниже.

X = np.random.randn(10, 501, 1)
reset_seeds()
encoder = Encoder()
AE = DecoderAE(encoder.input, encoder.output)
AE.compile(optimizer='adam', loss='mse')

reset_seeds()
encoder = Encoder()
decoder = Decoder()

# Test equality
features = encoder.predict(X)
score = decoder.predict(features) 
predictions = AE.predict(X)
print(np.sum(score - predictions))
# 0.0  <-- exact or close to
AE.save_weights('autoencoder_weights.h5')
AE_saved_weights = AE.get_weights()

decoder = Decoder()
load_weights(decoder, 'autoencoder_weights.h5')  # see "reference"
decoder_loaded_weights = decoder.get_weights()

AE_decoder_weights = AE_saved_weights[-len(decoder_loaded_weights):]
for w1, w2 in zip(AE_decoder_weights, decoder_loaded_weights):
    print(np.sum(w1 - w2))
# 0.0
# 0.0
# ...

Используемые функции:

def reset_seeds():
    np.random.seed(1)
    random.seed(2)
    if tf.__version__[0] == '2':
        tf.random.set_seed(3)
    else:
        tf.set_random_seed(3)
    print("RANDOM SEEDS RESET")

def DecoderAE(encoder_input, encoded_input):
    encoded_reshaped = Reshape((32,1))(encoded_input)
    d1 = Conv1D(32, 3, activation='tanh', padding='valid',
                       name='decod_conv1d_1')(encoded_reshaped)
    d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
    d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
    d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
    d5 = Flatten(name='decod_flatten')(d4)
    d6 = Dense(501, name='decod_dense1')(d5)
    decoded = Reshape((501,1), name='decod_reshape')(d6)
    return Model(encoder_input, decoded)
...