Добрый день всем,
Я пытаюсь установить sh классификатор softmax с помощью оптимизатора Адама с градиентной лентой. Я сталкиваюсь с проблемой при реализации ленты градиента и получаю следующую ошибку:
Traceback (последний вызов был последним): файл "X: \ Anaconda \ lib \ site-packages \ tenorflow \ python \ client \" session.py ", строка 303, в init fetch, allow_tensor = True, allow_operation = True)) Файл" X: \ Anaconda \ lib \ site-packages \ tenorsflow \ python \ framework \ ops.py " , строка 3796, в as_graph_element возвращает self._as_graph_element_locked (obj, allow_tensor, allow_operation) Файл "X: \ Anaconda \ lib \ site-packages \ tenorflow \ python \ framework \ ops.py", строка 3885, в _as_graph_element_locked (тип (obj) ). name , types_str)) TypeError: Невозможно преобразовать Адама в Тензор или Операцию.
И во время обработки вышеуказанного исключения произошло другое исключение:
Трассировка (последний вызов был последним): файл "C: / Users / Predator / .PyCharm2019.3 / config / scratches / scratch.py", строка 225, в feed_dict = {X: train_images, Y: train_labels}) Файл "X: \ Anaconda \ lib \ site-packages \ ten sorflow \ python \ client \ session.py ", строка 950, в прогоне run_metadata_ptr) Файл" X: \ Anaconda \ lib \ site-packages \ensorflow \ python \ client \ session.py ", строка 1158, в _run self._graph , fetches, feed_dict_tensor, feed_handles = feed_handles) Файл "X: \ Anaconda \ lib \ site-packages \ tenorflow \ python \ client \ session.py", строка 474, в init self._fetch_mapper = _FetchMapper. for_fetch (выборки) Файл "X: \ Anaconda \ lib \ site-packages \ tenorflow \ python \ client \ session.py", строка 264, в for_fetch вернуть _ListFetchMapper (выборка) Файл "X: \ Anaconda \ lib \ site-packages" \ tenorflow \ python \ client \ session.py ", строка 373, в init self._mappers = [_FetchMapper.for_fetch (fetch) для выборки из выборок] Файл" X: \ Anaconda \ lib \ site- " packages \ tenorflow \ python \ client \ session.py ", строка 373, в self._mappers = [_FetchMapper.for_fetch (выборка) для выборки из выборок] Файл" X: \ Anaconda \ lib \ site-packages \ensorflow \ python \ " client \ session.py ", строка 274, в for_fetch возвращает _ElementFetchMapper (fetches, contraction_fn) файл" X : \ Anaconda \ lib \ site-packages \ tenorflow \ python \ client \ session.py ", строка 307, в init (выборка, тип (выборка), str (e))) TypeError: аргумент выборки имеет недопустимый тип, должен быть строкой или Tensor. (Невозможно преобразовать Адама в Тензор или Операцию.)
Вот мой код:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from time import time
def preprocess_train_test_data2(train_images, train_labels, test_images, test_labels):
# number of unique labels
num_labels = np.unique(train_labels).size
# conversion to categorical labels
train_labels = tf.keras.utils.to_categorical(train_labels, num_labels)
test_labels = tf.keras.utils.to_categorical(test_labels, num_labels)
train_labels = train_labels.T
test_labels = test_labels.T
# flatten the images
train_images = train_images.reshape(train_images.shape[0], -1).astype(np.float32)
test_images = test_images.reshape(test_images.shape[0], -1).astype(np.float32)
# normalize the train and test data
train_images = (train_images / 255.0).T
test_images = (test_images / 255.0).T
return train_images, train_labels, test_images, test_labels
def forward_pass():
# reset default graph
# tf.compat.v1.reset_default_graph()
X = tf.placeholder(tf.float32, [n_inputs, None], name='image')
Y = tf.placeholder(tf.float32, [n_outputs, None], name='class')
# weight and bias matrices
W1 = tf.get_variable('w1', shape=(n_outputs, n_inputs))
B1 = tf.get_variable('b1', shape=(n_outputs, 1), initializer=tf.zeros_initializer())
# push training feature matrix through the final/output layer
A3 = tf.add(tf.matmul(W1, X), B1)
logits = tf.transpose(A3)
labels = tf.transpose(Y)
t = tf.exp(A3, name='exp')
# sum_t = tf.reduce_sum(A3)
H = tf.math.divide(t, tf.reduce_sum(A3))
return A3,X, Y, logits, labels, H
def cross_entropy(logits, labels):
# calculate the error
error = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels)
loss = tf.reduce_mean(error)
return error, loss
def calculate_accuracy(A3, Y):
# # Gradient Descent Optimizer
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# calculate correct predictions
correct_predictions = tf.cast(tf.equal(tf.argmax(A3), tf.argmax(Y)), tf.float32)
# calculate accuracy on the test set
accuracy = tf.reduce_mean(correct_predictions)
return accuracy
def update_epoch_history(epoch, current_loss, acc, validation_accuracy, validation_loss,
epoch_history):
"""
append details of each epoch to appropriate lists in history dictionary
:param epoch:
:param current_loss:
:param acc:
:param epoch_history:
:return:
"""
epoch_list = epoch_history['epoch']
loss_list = epoch_history['loss']
training_accuracy_list = epoch_history['training_accuracy']
validation_accuracy_list = epoch_history['validation_accuracy']
validation_loss_list = epoch_history['validation_loss']
epoch_list.append(epoch)
loss_list.append(current_loss)
training_accuracy_list.append(acc)
validation_accuracy_list.append(validation_accuracy)
validation_loss_list.append(validation_loss)
epoch_history['epoch'] = epoch_list
epoch_history['loss'] = loss_list
epoch_history['training_accuracy'] = training_accuracy_list
epoch_history['validation_accuracy'] = validation_accuracy_list
epoch_history['validation_loss'] = validation_loss_list
return epoch_history
def plot_training_graph(epoch_history):
"""
this plot code is copied from Week 9 lecture slides of 'Deep Learning'
"""
n_epochs = len(epoch_history['epoch'])
plt.style.use("ggplot")
plt.figure(dpi=300)
plt.plot(np.arange(0, n_epochs), epoch_history['loss'], label="train_loss")
plt.plot(np.arange(0, n_epochs), epoch_history["validation_loss"], label="Validation_loss")
plt.plot(np.arange(0, n_epochs), epoch_history['training_accuracy'], label="train_acc")
plt.plot(np.arange(0, n_epochs), epoch_history["validation_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
def plot_final_graph(run_history):
n_runs = len(run_history['test_acc'])
# plt.style.use("ggplot")
# plt.figure(dpi=300)
# # plt.plot(np.arange(0, n_runs), run_history['test_acc'], label="Test Accuracy")
# plt.plot(run_history["finish_time"], label="Finish Time")
# plt.xticks(run_history['batch_size'])
# plt.yticks(run_history['finish_time'])
# plt.title("Finish Time and Test Accuracy")
# plt.xlabel("Run #")
# plt.ylabel("Accuracy")
# plt.legend()
plt.clf()
plt.plot(run_history['finish_time'])
plt.xticks(run_history['batch_size'])
# plt.yticks(run_history['finish_time'])
plt.xlabel("Batch size")
plt.ylabel("Finish Time")
plt.show()
def update_run_history(run_history, batch_size, test_acc, finish_time):
test_acc_list = run_history['test_acc']
finish_time_list = run_history['finish_time']
batch_size_list = run_history['batch_size']
test_acc_list.append(test_acc)
finish_time_list.append(finish_time)
batch_size_list.append(batch_size)
run_history['test_acc'] = test_acc_list
run_history['finish_time'] = finish_time_list
run_history['batch_size'] = batch_size_list
return run_history
if __name__ == '__main__':
n_epochs = 40
n_outputs = 10
learning_rate = 0.01
run_history = {
'test_acc': list(),
'finish_time': list(),
'batch_size': list()
}
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# preprocess the data
train_images, train_labels, test_images, test_labels = preprocess_train_test_data2(train_images,
train_labels,
test_images,
test_labels)
n_inputs = train_images.shape[0]
print("n_inputs: ", n_inputs)
print("Data Extracted and Reshaped")
print(train_images.shape, train_labels.shape, test_images.shape, test_labels.shape)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# a dictionary to save information for all epochs
epoch_history = {
'epoch': list(),
'loss': list(),
'training_accuracy': list(),
'validation_accuracy': list(),
'validation_loss': list()
}
start_time = time()
for epoch in range(n_epochs):
with tf.GradientTape() as tape:
A3, X, Y, logits, labels, H = forward_pass()
error, loss = cross_entropy(logits, labels)
accuracy = calculate_accuracy(A3, Y)
adam_optimizer = tf.keras.optimizers.Adam(learning_rate)
start = 0
_, current_loss, acc = sess.run([adam_optimizer, loss, accuracy],
feed_dict={X:train_images, Y:train_labels})
print("Epoch: ", (epoch + 1), "Loss: ", current_loss, " Training Accuracy: ", acc)
validation_accuracy, validation_loss = sess.run([accuracy, loss],
feed_dict={X:test_images,
Y:test_labels})
print("Final validation_accuracy: ", validation_accuracy)
epoch_history = update_epoch_history(epoch, current_loss, acc, validation_accuracy,
validation_loss, epoch_history)
finish_time = time() - start_time
print("Training Time: %f seconds" % finish_time)
plot_training_graph(epoch_history)