TensorFlow Попытка предтрейдингового сеанса на некоторых других наборах данных - PullRequest
0 голосов
/ 05 октября 2018

Я новичок в глубоком обучении, и я пытаюсь сохранить и загрузить свою модель и проверить эту модель, чем на некоторых видеокадрах.Я ясен с логикой, но реальная проблема, с которой я сталкиваюсь, заключается в восстановлении сеанса и чем тестирование точности этой модели.

Как я могу проверить точность?

# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in 

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory


mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
x_train=mnist.train.images
y_train=mnist.train.labels
x_test=mnist.test.images 
y_test=mnist.test.labels

type(y_train)
# Any results you write to the current directory are saved as output.
layer1_neuron=500
layer2_neuron=500
layer3_neuron=500
number_of_class=10
batch_size=200
x=tf.Variable('float',[None,784]) #28 * 28 is 784 (shape of the data)
y=tf.Variable('float')
X=tf.placeholder('float',[None,784]) #28 * 28 is 784 (shape of the data)
Y=tf.placeholder('float')
    #my neural network   

def neural_network(x_train):
    hidden_layer_1={
        'weights':tf.Variable(tf.random_normal([784,layer1_neuron])),
        'biases': tf.Variable(tf.random_normal([layer1_neuron]))
         }
    hidden_layer_2={
        'weights':tf.Variable(tf.random_normal([layer1_neuron,layer2_neuron])),
        'biases':tf.Variable(tf.random_normal([layer2_neuron]))
        }
    hidden_layer_3={
        'weights':tf.Variable(tf.random_normal([layer2_neuron,layer3_neuron])),
        'biases':tf.Variable(tf.random_normal([layer3_neuron]))
        }
    output={
        'weights':tf.Variable(tf.random_normal([layer3_neuron,number_of_class])),
        'biases':tf.Variable(tf.random_normal([number_of_class]))
        }

    l1=tf.add(tf.matmul(x_train,hidden_layer_1['weights']),hidden_layer_1['biases'])
    l1=tf.nn.relu(l1)

    l2=tf.add(tf.matmul(l1,hidden_layer_2['weights']),hidden_layer_2['biases'])
    l2=tf.nn.relu(l2)

    l3=tf.add(tf.matmul(l2,hidden_layer_3['weights']),hidden_layer_3['biases'])
    l3=tf.nn.relu(l3)

    output=tf.add(tf.matmul(l3,output['weights']),output['biases'])

    return output


    # for splitting out batches of data


epochs_completed = 0
index_in_epoch = 0
num_examples = x_train.shape[0]
        # for splitting out batches of data
def next_batch(batch_size):

        global x_train
        global y_train
        global index_in_epoch
        global epochs_completed

        start = index_in_epoch
        index_in_epoch += batch_size

        # when all trainig data have been already used, it is reorder randomly    
        if index_in_epoch > num_examples:
            # finished epoch
            epochs_completed += 1
            # shuffle the data
            perm = np.arange(num_examples)
            np.random.shuffle(perm)
            x_train = x_train[perm]
            y_train = y_train[perm]
            # start next epoch
            start = 0
            index_in_epoch = batch_size
            assert batch_size <= num_examples
        end = index_in_epoch
        return x_train[start:end], y_train[start:end]


def traning_neuralNetwork(X):


    total_epochs=10
    total_loss=0
    epoch_loss=0
    batch_size=200
    num_batch = int(np.ceil(mnist.train.num_examples /batch_size))
    prediction=neural_network(X)
    cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=Y))
    optimizer=tf.train.AdamOptimizer().minimize(cost)
    saver=tf.train.Saver() 
    with tf.Session() as sess:
        #sess.run(init_op) # for just saving model weights
        sess.run(tf.global_variables_initializer())
        for epoch in range (total_epochs):

            total_loss=0
            for _ in range (num_batch):
                x_train,y_train=next_batch(batch_size)
                _,epoch_loss=sess.run([optimizer,cost],feed_dict={X:x_train,Y:y_train})
                total_loss+=epoch_loss
            print('Epoch ',epoch, " loss = ",total_loss)

        print("Traning Complete!")



        correct=tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1))
        accuracy=tf.reduce_mean(tf.cast(correct,'float'))
        print('accuracy',accuracy.eval({X:x_test,Y :y_test}))

        save_path = saver.save(sess, "input/model",global_step=10)




        print("Model saved in path: %s" % save_path)



traning_neuralNetwork(X)

и этокак я восстанавливаю

tf_saver = tf.train.import_meta_graph('mnist_weights/model-10.meta')
tf_saver.restore(session, tf.train.latest_checkpoint('mnist_weights/'))

Как запустить тест другого набора данных на этой восстановленной модели?

...