Попытка воспроизвести функцию потерь из примера , найденного здесь .Предоставленный код не показывает, как строить из зацикленного класса.Вот оригинальный код:
import numpy as np
def sigmoid(x):
return 1.0/(1+ np.exp(-x))
def sigmoid_derivative(x):
return x * (1.0 - x)
class NeuralNetwork:
def __init__(self, x, y):
self.input = x
self.weights1 = np.random.rand(self.input.shape[1],4)
self.weights2 = np.random.rand(4,1)
self.y = y
self.output = np.zeros(self.y.shape)
def feedforward(self):
self.layer1 = sigmoid(np.dot(self.input, self.weights1))
self.output = sigmoid(np.dot(self.layer1, self.weights2))
print(sum((self.y - self.output)**2))
def backprop(self):
# application of the chain rule to find derivative of the loss function with respect to weights2 and weights1
d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))
d_weights1 = np.dot(self.input.T, (np.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))
# update the weights with the derivative (slope) of the loss function
self.weights1 += d_weights1
self.weights2 += d_weights2
if __name__ == "__main__":
X = np.array([[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]])
y = np.array([[0],[1],[1],[0]])
nn = NeuralNetwork(X,y)
for i in range(1500):
nn.feedforward()
nn.backprop()
print(nn.output)
# Output:
# [1.38353847]
# [1.11068749]
# [1.00565082]
# [1.00286219]
# [1.0027016]
# [1.00254482]
# [1.00239119]
# [1.00224046]
# [1.00209239]
# [1.00194675]
# [1.0018033]
# [1.00166184]
# [1.00152213]
# [1.00138396]
# [1.00124712]
# ....1500 times to converge
Вот сюжет, который я пытаюсь воспроизвести: 
Что мне нужно сделать с кодом нижедобавлен в feedforward()
для построения с помощью глобальной переменной loss
?
import numpy as np
def sigmoid(x):
return 1.0/(1+ np.exp(-x))
def sigmoid_derivative(x):
return x * (1.0 - x)
class NeuralNetwork:
def __init__(self, x, y):
self.input = x
self.weights1 = np.random.rand(self.input.shape[1],4)
self.weights2 = np.random.rand(4,1)
self.y = y
self.output = np.zeros(self.y.shape)
loss = []
def feedforward(self):
global loss
self.layer1 = sigmoid(np.dot(self.input, self.weights1))
self.output = sigmoid(np.dot(self.layer1, self.weights2))
loss.append(sum((self.y - self.output)**2))
print("loss is {}".format(loss))
# print(sum((self.y - self.output)**2))
# print(self.output)
def backprop(self):
# application of the chain rule to find derivative of the loss function with respect to weights2 and weights1
d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))
d_weights1 = np.dot(self.input.T, (np.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))
# update the weights with the derivative (slope) of the loss function
self.weights1 += d_weights1
self.weights2 += d_weights2
if __name__ == "__main__":
X = np.array([[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]])
y = np.array([[0],[1],[1],[0]])
nn = NeuralNetwork(X,y)
for i in range(1500):
nn.feedforward()
nn.backprop()
print(nn.output)
# TypeError Traceback (most recent call last)
# <ipython-input-22-78313d1742df> in <module>()
# 46
# 47 for i in range(1500):
# ---> 48 nn.feedforward()
# 49 nn.backprop()
# 50
# TypeError: feedforward() missing 1 required positional argument: 'loss'