Ошибка значения: формы несовместимы с tenorflow tfrecord - PullRequest
0 голосов
/ 25 февраля 2019

Я пытаюсь классифицировать свои записи, используя файлы tenorflow и tfRecord.Для этой цели я реализовал этот скрипт на python:

Сначала я читаю свои записи, используя файл tfrecord, используя следующий скрипт:

import time
import tensorflow as tf
import numpy as np
import readers
import pre_precessing
from app_flag import FLAGS

def write_and_encode(data_list, tfrecord_filename):
    writer = tf.python_io.TFRecordWriter(tfrecord_filename)
    for label, data_matrix in data_list:
        example = tf.train.Example(features=tf.train.Features(
            feature={
                "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
                "data_raw": 
tf.train.Feature(bytes_list=tf.train.BytesList(value=[data_matrix.tostring()]))
            }
        ))
        writer.write(example.SerializeToString())

    writer.close()

def read_and_decode(tfrecord_filename):
    reader = tf.TFRecordReader()
    filename_queue = tf.train.string_input_producer([tfrecord_filename],)
    _, serialized_example = reader.read(filename_queue)
    feature = tf.parse_single_example(serialized_example,
                                      features={
                                          "label": tf.FixedLenFeature([], tf.int64),
                                          "data_raw": tf.FixedLenFeature([], tf.string)
                                      })
    data = tf.decode_raw(feature["data_raw"], tf.float64)
    data = tf.reshape(data, [FLAGS.image_rows, FLAGS.image_cols])
    return data, feature["label"]

Затем, чтобы обучить мой сложенный автоэнкодер, который используетсядля изучения особенностей и классификации.Я реализовал следующий скрипт:

def auto_encoder_layer(inputs,hidden_size,layer_name):
    """Create all tensors necessary for training an autoencoder layer and return a dictionary of the relevant tensors."""
    with tf.variable_scope(layer_name): #using variable scope makes collecting trainable vars for the layer easy
        """Create two tensors.  One for the encoding layer and one for the output layer.
        The goal is to have the output layer match the inputs it recieves."""
        encoding_layer = tf.layers.dense(inputs,hidden_size,activation=tf.nn.relu,name='encoding_layer_{}'.format(hidden_size))
        output_layer = tf.layers.dense(encoding_layer,int(inputs.shape[1]), name='outputs')

        """Use the mean squared error of the difference between the inputs and the output layer to define the loss"""
        layer_loss = tf.losses.mean_squared_error(inputs, output_layer)

all_vars = tf.trainable_variables() #this gets all trainable variables in the computational graph
    layer_vars = [v for v in all_vars if v.name.startswith(layer_name)] #select only the variables in this layer to train
    """create an op to minimize the MSE"""
    optimizer = tf.train.AdamOptimizer().minimize(layer_loss,var_list=layer_vars, name='{}_opt'.format(layer_name))

    """Create a summary op to monitor the loss of this layer"""
    loss_summ = tf.summary.scalar('{}_loss'.format(layer_name),layer_loss)
return {'inputs':inputs, 'encoding_layer':encoding_layer, 'output_layer':output_layer, 'layer_loss':layer_loss, 'optimizer':optimizer}

    def train_input_fn():

        tfrecord_file = "../resources/train_tfrecord"  
        dataset = tf.data.TFRecordDataset(tfrecord_file)
        dataset = dataset.map(parser)

        train_dataset = dataset.repeat(FLAGS.num_epochs).batch(FLAGS.batch_size)
        train_iterator = train_dataset.make_one_shot_iterator()

        features, labels = train_iterator.get_next()
        return features, labels

    def parser(record_line):

        features = {
            "label": tf.FixedLenFeature([], tf.int64),
            "data_raw": tf.FixedLenFeature([], tf.string)
        }
        parsed = tf.parse_single_example(record_line, features=features)
        label = tf.cast(parsed["label"], tf.int32) - 1  
        data = tf.decode_raw(parsed["data_raw"], tf.float64)
        data = tf.reshape(data, [FLAGS.image_rows, FLAGS.image_cols])
        data = tf.cast(data, tf.float32)
        return data, label


    def eval_input_fn():
        tfrecord_file = "../resources/test_tfrecord"  
        dataset = tf.data.TFRecordDataset(tfrecord_file)
        dataset = dataset.map(parser)
        # num_epochs = 5
        batch_size = 5

        eval_dataset = dataset.batch(FLAGS.batch_size)
        eval_iterator = eval_dataset.make_one_shot_iterator()

        features, labels = eval_iterator.get_next()
        return features, labels


    def train_layer(output_layer, layer_loss,optimizer):
        """Train each encoding layer for 1000 steps"""
        layer_name = output_layer.name.split('/')[0]
        print('Pretraining {}'.format(layer_name))
        x, y_labels=train_input_fn()
        input_l = tf.reshape(x, [-1, FLAGS.image_rows, FLAGS.image_cols, 1])
        instance_batch, label_batch = tf.train.shuffle_batch([input_l], batch_size=5, capacity=200, min_after_dequeue=100)
     _out_layer, _layer_loss,_ = sess.run([output_layer, layer_loss, optimizer], feed_dict ={x:instance_batch,y_labels:label_batch})

                #print(_layer_loss)

            print('layer finished')


     #define the number of layers and batch size to use in the model

        num_layers = 6

    # download and import data 

        x, y_labels=train_input_fn()

    #store each of the layers in a list so we can interate through and train them later
    model_layers = []

"""For each encoding layer, make the number of hidden units half the number of input units.
This will force the encoding layer to learn a simpler representation of the data."""
hidden_size = x.shape[1].value/2
next_layer_inputs = x

#create all of the layers for the model
for layer in range(0, num_layers):
    layer_name = 'layer_{}'.format(layer)
    model_layers.append(auto_encoder_layer(next_layer_inputs, hidden_size, layer_name))
    """After training a layer we will use the encoding from that layer as inputs to the next.
    The outputs from that layer are no longer used."""
    next_layer_inputs = model_layers[-1]['encoding_layer']
    hidden_size = int(hidden_size/2)

"""""Now that all of the layers for the stacked auto encoder have been created add one final layer
for classification.  The output can take on one of 10 values, 0-9, and the labels are one-hot encoded, so
use a softmax layer for the prediction."""
last_layer = model_layers[-1]
outputs = last_layer['encoding_layer']
with tf.variable_scope('predicted_class'):
    y = tf.layers.dense(outputs,5,activation=tf.nn.softmax)

#For the loss use cross entropy
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_labels, logits=y))

"""create a global step counter to keep track of epochs during training and add this to the
net_op below.  This will increment the step counter each time net_op is run."""
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
            loss=cross_entropy,
            global_step=tf.train.get_global_step()
        )

#create ops to check accuracy
correct_prediction = tf.equal(tf.argmax(y_labels, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')
#add a summary op for loging
accuracy_summ = tf.summary.scalar('train_accuracy',accuracy)

"""Use a MonitoredTrainingSession for running the computations.  It makes running on distributed systems
possible, handles checkpoints, saving summaries, and restoring from crashes easy."""

#create hooks to pass to the session.  These can be used for adding additional calculations, loggin, etc.
#This hook simply tells the session how many steps to run
hooks=[tf.train.StopAtStepHook(last_step=10000)]

#This command collects all summary ops that have been added to the graph and prepares them to run in the next session
tf.summary.merge_all()

logs_dir = 'logs'



with tf.train.MonitoredTrainingSession(hooks=hooks, checkpoint_dir=logs_dir,save_summaries_steps=100) as sess:

    start_time = time.time()

    """First train each layer one at a time, freezing weights from previous layers.
    This was accomplished by declaring which variables to update when each layer optimizer was defined."""
    for layer_dict in model_layers:
        output_layer = layer_dict['output_layer']
        layer_loss = layer_dict['layer_loss']
        optimizer = layer_dict['optimizer']
        train_layer( output_layer, layer_loss, optimizer)



#examine the final test set accuracy by loading the trained model, along with the last saved checkpoint
with tf.Session() as sess:
    new_saver = tf.train.import_meta_graph('logs/model.ckpt-10000.meta')
    new_saver.restore(sess, tf.train.latest_checkpoint('./logs'))
    instance, label = read_and_decode("../resources/test_tfrecord")
    instance_batch, label_batch = tf.train.shuffle_batch([instance, label], batch_size=3,
                                                         capacity=200, min_after_dequeue=100, num_threads=2)
    _accuracy_test = accuracy.eval(session=sess,feed_dict={x:instance_batch,y_labels:label_batch})
    print('test_set accuracy: {}'.format(_accuracy_test))

duration = (time.time() - start_time)/60
print("Run complete.  Total time was {} min".format(duration))

Когда я запускаю свой код, я получаю сообщение об ошибке, что формы несовместимы: ValueError («Формы% s и% s несовместимы»% (self, other)) ValueError: формы (?, 39, 39) и (?, 39, 662) несовместимы

...