FailedPreconditionError: Попытка использовать неинициализированное значение Variable - PullRequest
0 голосов
/ 04 мая 2020

Я пытаюсь построить модель для классификации изображений кошек и собак, получаю FailedPreconditionError. Я использовал отдельную библиотеку LayersConstructor для создания слоев. Когда я запускаю его, это показывает, что я пытаюсь использовать неинициализированную переменную, тогда как я использую инициализатор глобальных переменных. Может кто-нибудь, пожалуйста, помогите мне с этим, а также как мы можем отладить их в tenorflow? Заранее спасибо, я дал код ниже.

tf.reset_default_graph()

x = tf.placeholder(tf.float32, shape=[None, image_size_flat], name='x')
x_image = tf.reshape(x, [-1, image_size, image_size, num_channels])
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, axis=1)
print(x_image.graph)
#for the first layer the input image will have the shape -[None,128,128,3]

layer_conv1,weights_conv1 = LayersConstructor.new_conv_layer(input = x_image,num_input_channels = num_channels,
                                                            filter_size = filter_size1,num_filters = num_filters1,
                                                            use_pooling = True,graph = tf.get_default_graph())

#for the first layer the output image will have the shape - [None,64,64,32], shaoe of weights is (3,3,3,32)

layer_conv2,weights_conv2 = LayersConstructor.new_conv_layer(input = layer_conv1,num_input_channels = num_filters1,
                                                             filter_size = filter_size2,num_filters = num_filters2,
                                                             use_pooling = True,graph = tf.get_default_graph())

#for the second layer, the output wil have shape - [None,32,32,32], weights - [3,3,32,32]

layer_conv3,weights_conv3 = LayersConstructor.new_conv_layer(input = layer_conv2,num_input_channels = num_filters2,
                                                             filter_size = filter_size3,num_filters = num_filters3,
                                                             use_pooling = True,graph = tf.get_default_graph())


#the third layer will have an output of [None,16,16,64], weights - [3,3,32,64]

layer_flat,num_features = LayersConstructor.flatten_layer(layer_conv3)

#flatten layer will have an output of [None,16384]

#the number of features input to this will be num_features returened by the last layer. 32*32*64

layer_fc1 = LayersConstructor.new_fc_layer(input = layer_flat,num_inputs = num_features,
                                           num_outputs = fc_size,use_relu = True,graph = tf.get_default_graph())
#output for fc_1 will be [None,128]

layer_fc2 = LayersConstructor.new_fc_layer(input = layer_fc1,num_inputs = fc_size,
                                           num_outputs = num_classes,use_relu = False,graph = tf.get_default_graph())
#final layer will output as [None,2]

y_pred = tf.nn.softmax(layer_fc2)
y_pred_cls = tf.argmax(y_pred,axis = 1)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = layer_fc2,labels = y_true)

cost = tf.reduce_mean(cross_entropy)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
correct_prediction = tf.equal(y_pred_cls,y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_op = tf.global_variables_initializer()

train_batch_size = batch_size

acc_list = []
val_acc_list = []

# Counter for total number of iterations performed so far.
total_iterations = 0
iter_list = []

def optimize(num_iterations):

    #make sure that we use the global variable for total_iterations
    global total_iterations,g

    with tf.Session() as sess:
        sess.run(init_op)

    #start time used for printing time-usage
    start = time.time()

    best_value_loss = float("inf")
    patience = 0

    for i in range(total_iterations,total_iterations + num_iterations):

        # Get a batch of training examples.
        # x_batch now holds a batch of images and
        # y_true_batch are the true labels for those images.
        x_batch, y_true_batch, _, cls_batch = data.train.next_batch(batch_size)
        x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(batch_size)

        # Convert shape from [num examples, rows, columns, depth]
        # to [num examples, flattened image shape]

        x_batch = x_batch.reshape(batch_size,image_size_flat)
        x_valid_batch = x_valid_batch.reshape(batch_size,image_size_flat)

        # Put the batch into a dict with the proper names
        # for placeholder variables in the TensorFlow graph.
        feed_dict_train = {x:x_batch,y_true:y_true_batch}
        feed_dict_validate = {x:x_valid_batch,y_true:y_valid_batch}

        with tf.Session() as sess:
            sess.run(optimizer,feed_dict_train)

        # Print status at end of each epoch (defined as full pass through training Preprocessor).
        if i % int(data.train.num_examples/batch_size) == 0: 
            val_loss = session.run(cost, feed_dict=feed_dict_validate)
            epoch = int(i / int(data.train.num_examples/batch_size))

            acc, val_acc = print_progress(epoch, feed_dict_train, feed_dict_validate, val_loss)
            msg = "Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation Accuracy: {2:>6.1%}, Validation Loss: {3:.3f}"
            print(msg.format(epoch + 1, acc, val_acc, val_loss))
            print(acc)
            acc_list.append(acc)
            val_acc_list.append(val_acc)
            iter_list.append(epoch+1)

            if early_stopping:    
                if val_loss < best_val_loss:
                    best_val_loss = val_loss
                    patience = 0
                else:
                    patience += 1
                if patience == early_stopping:
                    break

        # Update the total number of iterations performed.
        total_iterations += num_iterations

        # Ending time.
        end_time = time.time()

        # Difference between start and end-times.
        time_dif = end_time - start_time

        # Print the time-usage.
        print("Time elapsed: " + str(timedelta(seconds=int(round(time_dif)))))


#Evaluation and optimization 
optimize(num_iterations=1000)
print(acc_list)
# Plot loss over time
plt.plot(iter_list, acc_list, 'r--', label='CNN training accuracy per iteration', linewidth=4)
plt.title('CNN training accuracy per iteration')
plt.xlabel('Iteration')
plt.ylabel('CNN training accuracy')
plt.legend(loc='upper right')
plt.show()

LayersConstructor приведен ниже -

import os
import pandas as pd
import numpy as np
import matplotlib.pyplot
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.compat.v1.disable_eager_execution()
from tensorflow.python.framework import ops
import warnings

warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()

def new_weights(shape,graph):
     with graph.as_default():
        weights =  tf.Variable(tf.random.truncated_normal(shape,stddev = 0.05))
     return weights   

#temp_weights = new_weights((2,3))
#temp_weights

def new_biases(length,graph):
    with graph.as_default():
        biases =  tf.Variable(tf.constant(0.05,shape = [length]))
    return biases

#temp_biases = new_biases(10)
#temp_biases

def new_conv_layer(input,             #the previos layer
                   num_input_channels, #num channel in previous layer
                   filter_size,       #height and width of filters
                   num_filters,       # num_filters
                   use_pooling = True, #use pooling layer or not
                   graph = None
                   ):

        # Shape of the filter-weights for the convolution.
        # This format is determined by the TensorFlow API.
        shape = [filter_size, filter_size, num_input_channels, num_filters]
        #creatre new wieths aka filters for the given shape
        weights = new_weights(shape = shape,graph = graph)
        #create new biases one for each filter
        biases = new_biases(length = num_filters,graph = graph)


        # Create the TensorFlow operation for convolution.
        # Note the strides are set to 1 in all dimensions.
        # The first and last stride must always be 1,
        # because the first is for the image-number and
        # the last is for the input-channel.
        # But e.g. strides=[1, 2, 2, 1] would mean that the filter
        # is moved 2 pixels across the x- and y-axis of the image.
        # The padding is set to 'SAME' which means the input image
        # is padded with zeroes so the size of the output is the same.
        layer = tf.nn.conv2d(input = input,
                             filter = weights,
                             strides = [1,1,1,1],
                             padding = 'SAME')
        # Add the biases to the results of the convolution.
        # A bias-value is added to each filter-channel.
        layer += biases

         # Use pooling to down-sample the image resolution?
        if use_pooling:
            # This is 2x2 max-pooling, which means that we
            # consider 2x2 windows and select the largest value
            # in each window. Then we move 2 pixels to the next window.
            layer = tf.nn.max_pool2d(input = layer,
                                     ksize = [1,2,2,1],
                                     strides = [1,2,2,1],
                                     padding = 'SAME')

        # Rectified Linear Unit (ReLU).
        # It calculates max(x, 0) for each input pixel x.
        # This adds some non-linearity to the formula and allows us
        # to learn more complicated functions.
        layer = tf.nn.relu(layer)

        # Note that ReLU is normally executed before the pooling,
        # but since relu(max_pool(x)) == max_pool(relu(x)) we can
        # save 75% of the relu-operations by max-pooling first.

        # We return both the resulting layer and the filter-weights
        # because we will plot the weights later.
        return layer, weights


def flatten_layer(layer):
    layer_shape = layer.get_shape()

    # The shape of the input layer is assumed to be:
    # layer_shape == [num_images, img_height, img_width, num_channels]


    # The number of features is: img_height * img_width * num_channels
    # We can use a function from TensorFlow to calculate this.    
    num_features = layer_shape[1:4].num_elements()

    # Reshape the layer to [num_images, num_features].
    # Note that we just set the size of the second dimension
    # to num_features and the size of the first dimension to -1
    # which means the size in that dimension is calculated
    # so the total size of the tensor is unchanged from the reshaping.
    layer_flat = tf.reshape(layer,[-1,num_features])

    # The shape of the flattened layer is now:
    # [num_images, img_height * img_width * num_channels]

    return layer_flat,num_features


def new_fc_layer(input,           #the previous layer
                 num_inputs,      #num of units in prev layer
                 num_outputs,     #num of output units
                 use_relu = True,
                 graph = None): # use relu
    #createa weights and biases for the fc layer
    weights = new_weights(shape = [num_inputs,num_outputs],graph = graph)
    biases = new_biases(length = num_outputs,graph = graph)

    # Calculate the layer as the matrix multiplication of
    # the input and weights, and then add the bias-values.
    layer = tf.matmul(input,weights) + biases

    if use_relu:
        layer = tf.nn.relu(layer)

    return layer    

Ошибка


  File "<ipython-input-41-3484afe8be87>", line 144, in <module>
    optimize(num_iterations=1000)

  File "<ipython-input-41-3484afe8be87>", line 105, in optimize
    sess.run(optimizer,feed_dict_train)

  File "C:\Users\Lenovo\Anaconda3\lib\site-packages\tensorflow_core\python\client\session.py", line 960, in run
    run_metadata_ptr)

  File "C:\Users\Lenovo\Anaconda3\lib\site-packages\tensorflow_core\python\client\session.py", line 1183, in _run
    feed_dict_tensor, options, run_metadata)

  File "C:\Users\Lenovo\Anaconda3\lib\site-packages\tensorflow_core\python\client\session.py", line 1361, in _do_run
    run_metadata)

  File "C:\Users\Lenovo\Anaconda3\lib\site-packages\tensorflow_core\python\client\session.py", line 1386, in _do_call
    raise type(e)(node_def, op, message)

FailedPreconditionError: Attempting to use uninitialized value Variable
     [[node Variable/read (defined at C:\Users\Lenovo\book -Practical Conv networks\chapter_3\cnn_cats and dogs\LayersConstructor.py:18) ]]
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...