Я смотрел другие учебные пособия, и они могут получить точность до 90% всего за 10 эпох. Поэтому я предполагаю, что в моей реализации что-то не так, потому что моя точность действительно низкая, она составляет менее 1% после 10 эпох и едва увеличивается. Я использую набор данных MNIST, и любая помощь будет принята с благодарностью
from PIL import Image
from tensorflow.keras import datasets, layers, models
from keras.layers import Dense, Dropout, Flatten
import matplotlib.pyplot as plt
import numpy as np
import keras
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train/255
X_test = X_test/255
X_train_processed = np.reshape(X_train,[X_train.shape[0],X_train.shape[1],X_train.shape[2],1])
X_test_processed = np.reshape(X_test,[X_test.shape[0],X_test.shape[1],X_test.shape[2],1])
X_train_processed = np.pad(X_train_processed, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_test_processed = np.pad(X_test_processed, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_train_processed = tf.image.resize(
images = X_train_processed,
size = np.array([32,32])
)
X_test_processed = tf.image.resize(
images = X_test_processed,
size = np.array([32,32])
)
Y_train_processed = tf.one_hot(y_train,10)
Y_test_processed = tf.one_hot(y_test,10)
Lnet = tf.keras.Sequential()
#First Layer
Lnet.add(
tf.keras.layers.Conv2D(
filters = 6,
kernel_size = (5,5),
strides = (1,1),
padding = 'valid',
activation = 'relu',
#kernel_initializer = keras.initializers.glorot_normal(seed=0)
)
)
Lnet.add(
tf.keras.layers.AveragePooling2D(
pool_size = (2,2),
strides = (2,2),
padding = 'valid'
)
)
#Second Layer
Lnet.add(
tf.keras.layers.Conv2D(
filters = 16,
kernel_size = (5,5),
strides = (1,1),
padding = 'valid',
activation = 'relu'#,
#kernel_initializer = keras.initializers.glorot_normal(seed=0)
)
)
Lnet.add(
tf.keras.layers.AveragePooling2D(
pool_size = (2,2),
strides = (2,2),
padding = 'valid'
)
)
Lnet.add(tf.keras.layers.Flatten())
Lnet.add(
tf.keras.layers.Dense(
units = 120,
activation = 'relu'
)
)
Lnet.add(tf.keras.layers.Flatten())
Lnet.add(
tf.keras.layers.Dense(
units = 84,
activation = 'relu'
)
)
Lnet.add(
tf.keras.layers.Dense(
units = 10,
activation = 'softmax'
)
)
Lnet.compile(
loss = keras.losses.categorical_crossentropy,
optimizer = 'Adam',
metrics = ['Accuracy']
)
Lnet.fit(
x = X_train_processed,
y = Y_train_processed,
batch_size = 128,
epochs = 10,
)
score = Lnet.evaluate(
x = X_test_processed,
y = Y_test_processed
)
print(score[1])
Выход:
Epoch 1/10
469/469 [==============================] - 8s 18ms/step - loss: 0.3533 - accuracy: 0.0000e+00
Epoch 2/10
469/469 [==============================] - 8s 18ms/step - loss: 0.1013 - accuracy: 5.1667e-05
Epoch 3/10
469/469 [==============================] - 8s 18ms/step - loss: 0.0730 - accuracy: 2.3167e-04
Epoch 4/10
469/469 [==============================] - 10s 21ms/step - loss: 0.0582 - accuracy: 4.8833e-04
Epoch 5/10
469/469 [==============================] - 9s 19ms/step - loss: 0.0478 - accuracy: 9.3333e-04
Epoch 6/10
469/469 [==============================] - 11s 23ms/step - loss: 0.0405 - accuracy: 0.0019
Epoch 7/10
469/469 [==============================] - 12s 25ms/step - loss: 0.0371 - accuracy: 0.0026
Epoch 8/10
469/469 [==============================] - 11s 23ms/step - loss: 0.0301 - accuracy: 0.0057
Epoch 9/10
469/469 [==============================] - 12s 25ms/step - loss: 0.0280 - accuracy: 0.0065
Epoch 10/10
469/469 [==============================] - 11s 24ms/step - loss: 0.0260 - accuracy: 0.0085
313/313 [==============================] - 1s 3ms/step - loss: 0.0323 - accuracy: 0.0080
0.008030000142753124