Я пытаюсь обучить свою модель условной GAN и получаю следующую ошибку:
InvalidArgumentError: индексы [1,0] = 4 не находятся в [0, 4) [[node model_4 / embedding_3 / embedding_lookup (определено в: 3)]] [Op: __ inference_predict_function_3463] Ошибки могли быть вызваны операцией ввода. Операции с источником ввода, подключенные к узлу model_4 / embedding_3 / embedding_lookup: model_4 / embedding_3 / embedding_lookup / 3351 (определено в /usr/lib/python3.6/contextlib.py:81) Стек вызовов функций: pred_function
Любое предлагаемое решение?
from zipfile import ZipFile
with ZipFile('particles.zip', 'r') as myzip:
myzip.extractall()
print('done')
import tensorflow as tf
import os
import cv2
import numpy as np
from tensorflow.keras.layers import Dense,Reshape,Dropout,LeakyReLU,Flatten,BatchNormalization,Conv2D,Conv2DTranspose,Embedding,Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import imread
cs_image = imread('/content/particles/CS/tem_particle_CS_6_356.png', cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(cs_image, (28,28))
plt.imshow(resized_array)
categories = ['CS', 'MC', 'MCS', 'SS']
data_directory = '/content/particles'
training_data = []
def create_training_data():
for category in categories:
path = os.path.join(data_directory, category)
class_num = categories.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_array = cv2.resize(img_array, (40,40))
training_data.append([resized_array, class_num])
except Exception as e:
pass
create_training_data()
print(len(training_data))
import random
random.shuffle(training_data)
x_train = []
y_train = []
for features, label in training_data:
x_train.append(features)
y_train.append(label)
x_train = np.array(x_train).reshape(-1, 40, 40, 1)
y_train = np.array(y_train).reshape(-1)
x_train = x_train/255
x_train = x_train.reshape(-1, 40, 40, 1) * 2. - 1.
print(x_train.shape, y_train.shape)
#discriminator
input_label = Input(shape=(1,))
emb = Embedding(4, 20)(input_label)
upsample = Dense(40*40)(emb)
upsample = Reshape((40,40,1))(upsample)
input_image = Input(shape=(40,40,1))
concat = Concatenate()([input_image, upsample])
h1 = Conv2D(128, kernel_size = (3,3), strides=2, padding='same')(concat)
h1 = LeakyReLU(alpha=0.2)(h1)
h2 = Conv2D(128, kernel_size = (3,3), strides=2, padding='same')(h1)
h2 = LeakyReLU(alpha=0.2)(h2)
h3 = Conv2D(128, kernel_size = (3,3), strides=2, padding='same')(h2)
h3 = LeakyReLU(alpha=0.2)(h3)
flat = Flatten()(h3)
drop = Dropout(0.4)(flat)
fc = Dense(64, activation='relu')(drop)
output = Dense(1, activation='sigmoid')(fc)
discriminator = Model(inputs=[input_image, input_label], outputs=output)
discriminator.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
from tensorflow.keras.utils import plot_model
plot_model(discriminator, to_file='discriminator_model.png')
#generator
input_label = Input(shape=(1,))
emb = Embedding(4, 20)(input_label)
upsample = Dense(10*10)(emb)
upsample = Reshape((10,10,1))(upsample)
latent_input = Input(shape=(100,))
rf = Dense(10*10*128)(latent_input)
rf = LeakyReLU(alpha=0.2)(rf)
rf = Reshape((10, 10, 128))(rf)
concat = Concatenate()([rf, upsample])
convt = Conv2DTranspose(128, kernel_size=(3, 3), strides=2, padding='same')(concat)
convt = LeakyReLU(0.2)(convt)
convt = Conv2DTranspose(128, kernel_size=(3, 3), strides=2, padding='same')(convt)
convt = LeakyReLU(0.2)(convt)
output = Conv2D(1, kernel_size=(10,10), activation='tanh', padding="same")(convt)
generator = Model(inputs=[latent_input, input_label], outputs=output)
discriminator.trainable = False
latent_input, input_label = generator.input
generator_output = generator.output
gan_output = discriminator([generator_output, input_label])
gan = Model(inputs=[latent_input, input_label], outputs= gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
def real_samples(x_train, batch_size):
random_samples = np.random.randint(0, x_train.shape[0], batch_size)
x, labels = x_train[random_samples], y_train[random_samples]
y = np.ones((batch_size, 1))
return [x, labels], y
def generate_latent_points(coding_size, batch_size, classes=10):
x = np.random.randn(batch_size*coding_size)
z = x.reshape(batch_size, coding_size)
labels = np.random.randint(0, classes, batch_size)
return [z, labels]
def fake_samples(generator, coding_size, batch_size):
z, input_label = generate_latent_points(coding_size, batch_size)
x = generator.predict([z, input_label])
y = np.zeros((batch_size, 1))
return [x, input_label], y
epochs = 300
coding_size = 100
batch_size = 32
epoch_batch = int(x_train.shape[0] / batch_size)
for i in range(epochs):
for j in range(epoch_batch):
[x_real, label_real], y_real = real_samples(x_train, batch_size)
d_loss1, _ = discriminator.train_on_batch([x_real, label_real], y_real)
[x_fake, labels], y_fake = fake_samples(generator, coding_size, batch_size)
d_loss2, _ = discriminator.train_on_batch([x_fake, labels], y_fake)
[z, labels_input] = generate_latent_points(latent_dim, batch_size)
y_gan = ones((batch_size, 1))
g_loss, _ = gan.train_on_batch([z, labels_input], y_gan)
print(f'Epoch: {i+1} -- d_loss1: {d_loss1} -- d_loss1: {d_loss2} -- g_loss: {g_loss}')