Я решил проблему: сначала я перепутал некоторые измерения, которые я исправил. Хотя истинная проблема заключалась в обновлении весов, когда я пытался обновить весовые коэффициенты, например,
self.W_hidden = w_hidden_old - learning_rate * self.W_hidden
, но это неправильно, поскольку скорость обучения должна быть умножена на частоту ошибки по отношению к веса, а не с самой матрицей весов. Итак, правильный путь:
self.W_hidden = w_hidden_old - learning_rate * self.Partials_W_hidden
После этого я получаю следующий результат и кривую потерь:
Окончательный код:
class Neural_Net:
"""
"""
def __init__(self, activation_function, learning_rate, runs):
self.activation_function = activation_function
self.Data = pd.read_csv(r"U:\19_035_Machine_Learning_Workshop\2_Workshopinhalt\Weitere\Neural Networks\AirQualityUCI\AirQualityUCI.csv", sep=';', decimal=b',').iloc[:, :-2].dropna()
self.X_train = np.linspace(0,5,1000)
self.y_train = np.sin(self.X_train)
plt.plot(self.X_train, self.y_train)
self.y_pred = None
self.W_input = np.random.randn(1, 3)
self.Partials_W_input = np.random.randn(1, 3)
self.W_hidden = np.random.randn(3,3)
self.Partials_W_hidden = np.random.randn(3,3)
self.W_output = np.random.randn(3,1)
self.Partials_W_output = np.random.randn(3,1)
self.Activations = np.zeros((3,2))
self.Partials = np.zeros((3,2))
self.Output_Gradient = None
self.Loss = 0
self.learning_rate = learning_rate
self.runs = runs
self.Losses = []
self.i = 0
def apply_activation_function(self, activation_vector):
# print('activation: ', 1/(1+np.exp(-activation_vector)))
return 1/(1+np.exp(-activation_vector))
def forward_pass(self, training_instance):
for layer in range(len(self.Activations[0])):
# For the first layer between X and the first hidden layer
if layer == 0:
pre_activation_first = self.W_input.T @ training_instance.reshape(1,1)
# print('pre activation: ', pre_activation)
# Apply the activation function
self.Activations[:,0] = self.apply_activation_function(pre_activation_first).ravel()
else:
pre_activation_hidden = self.W_hidden.T @ self.Activations[:, layer-1]
self.Activations[:, layer] = self.apply_activation_function(pre_activation_hidden)
# print('Activations: ', self.Activations)
output = self.W_output.T @ self.Activations[:, -1].reshape(-1,1)
# print('output: ', output)
return output
def backpropagation(self, y_true, training_instance):
if self.activation_function == 'sigmoid':
pass
if self.activation_function == 'linear':
# Calculate the ouput gradient
self.Output_Gradient = -(y_true-self.y_pred)
# print('Output Gradient: ', self.Output_Gradient)
# Calculate the partial gradients of the Error with respect to the pre acitvation values in the nodes
self.Partials[:, 1] = ((self.Activations[:, 1]*(1-self.Activations[:, 1])).reshape(-1,1)*(self.W_output @ self.Output_Gradient)).ravel()
self.Partials[:, 0] = self.Activations[:, 0]*(1-self.Activations[:, 0])*(self.W_hidden @ self.Partials[:, 1])
# print('Partials: ', self.Partials)
# Calculate the Gradients with respect to the weights
self.Partials_W_output = self.Output_Gradient * self.Activations[:, -1]
# print('Partials_W_output: ', self.Partials_W_output)
self.Partials_W_hidden = self.Partials[:, -1].reshape(3,1) * self.Activations[:, 0].reshape(1,3)
# print('Partials_W_hidden: ',self.Partials_W_hidden)
self.Partials_W_input = (self.Partials[:, 0].reshape(3,1) * training_instance.T).T
# print('Partials_W_input: ', self.Partials_W_input)
def weight_update(self, training_instance, learning_rate):
# Output Layer weights
w_output_old = self.W_output.copy()
self.W_output = w_output_old - learning_rate*self.Partials_W_output.reshape(-1,1)
# Hidden Layer weights
w_hidden_old = self.W_hidden.copy()
self.W_hidden = w_hidden_old - learning_rate * self.Partials_W_hidden
# print('W_hidden new: ', self.W_hidden)
# Input Layer weights
w_input_old = self.W_input.copy()
self.W_input = w_input_old - learning_rate * self.Partials_W_input
# print('W_input new: ', self.W_input)
def train_model(self):
# print('Initially predicted Value: ', self.make_prediction(self.X_test[0]))
# print('True value: ', self.y_test[0])
for _ in range(self.runs):
for instance in range(len(self.X_train)):
# forward pass
self.y_pred = self.forward_pass(self.X_train[instance])
# Calculate loss
self.Loss = self.calc_loss(self.y_pred, self.y_train[instance])
# print('Loss: ', self.Loss)
# Calculate backpropagation
self.backpropagation(self.y_train[instance], self.X_train[instance])
# Update weights
self.weight_update(self.X_train[instance], self.learning_rate)
# print(self.Losses)
# plt.plot(range(len(self.Losses)), self.Losses)
# plt.show()
# Make predictions
predictions = []
for i in np.linspace(0,5,1000):
predictions.append(self.make_prediction(i)[0])
plt.plot(np.linspace(0,5,1000), predictions)
def make_prediction(self, X_new):
return self.forward_pass(X_new)
def calc_loss(self, y_pred, y_true):
loss = (1/2)*(y_true-y_pred)**2
self.Losses.append(loss[0])
return (1/2)*(y_true-y_pred)**2
def accuracy(self):
pass
Neural_Net('linear', 0.1, 1500).train_model()
Чтобы оптимизировать код, мы должны добавить смещение на вход скрытого слоя.