Я продолжаю сталкиваться с этой ошибкой, я прочитал некоторые случаи с такой же ошибкой, и я пытался преобразовать каждый список в массив numpy, но он все еще не работает.
Что именно делает эта ошибка означает?
TypeError Traceback (most recent call last)
TypeError: only size-1 arrays can be converted to Python scalars
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
<ipython-input-4-b5b54f6e1f9e> in <module>
69
70 model = LogisticRegression(eta = 0.01, n_iterations = 1000, lamb = 100)
---> 71 model.fit(x_train, y_train)
72 ypre = model.predict(x_test)
73 print(ypre)
<ipython-input-4-b5b54f6e1f9e> in fit(self, x, y)
29 t = (hx - y)
30
---> 31 s = self.cal_s(t, x, row, column)
32 gradient_w = np.sum(s, 0) / row * self.eta
33 gradient_b = np.sum(t, 0) / row * self.eta
<ipython-input-4-b5b54f6e1f9e> in cal_s(self, t, x, row, colum)
17 for i in range(0,row):
18 for j in range(0, colum):
---> 19 s[i][j] = t[i] * x[i][j]
20 return s
21
ValueError: setting an array element with a sequence.
Где я go ошибся? Как я могу решить это?
Функция работает, прежде чем я импортирую какой-нибудь CSV-файл. (Я использовал np.random.rand, чтобы обобщить его раньше)
Вот мой код:
import numpy as np
class LogisticRegression:
def __init__(self, eta, n_iterations, lamb):
self.w = np.zeros(30) #theta
self.b = 0 #theta 0
self.eta = eta #Learing rate
self.n_iterations = n_iterations #times for iterations
self.lamb = lamb #
self.r = (1 - lamb * self.eta / np.size(self.w, 0))
def logistic(self, x):
return 1.0/(1 + np.exp(-x))
def cal_s(self, t, x, row, colum):
s = np.zeros([row, colum], dtype=float)
for i in range(0,row):
for j in range(0, colum):
s[i][j] = t[i] * x[i][j]
return s
def fit(self, x, y):
itr = 0
row, column = np.shape(x)
print('number of instance', row)
while itr <= self.n_iterations:
fx = np.dot(self.w, x.T)
hx = self.logistic(fx)
t = (hx - y)
s = self.cal_s(t, x, row, column)
gradient_w = np.sum(s, 0) / row * self.eta
gradient_b = np.sum(t, 0) / row * self.eta
self.w = self.w * self.r - gradient_w
self.b -= gradient_b
itr += 1
def predict(self, x_test):
ypre = np.dot(self.w, x_test.T) + self.b
temp = ypre >= 0
yp = temp.astype(int)
return yp
if __name__ == '__main__':
import matplotlib.pyplot as plt
import csv
with open('X_train.csv', newline='') as xTrain:
xtrain = csv.reader(xTrain, delimiter=',')
x_train = list(xtrain)
x_train = np.array(x_train)
x_train = x_train.astype(np.float)
with open('Y_train.csv', newline='') as yTrain:
ytrain = csv.reader(yTrain, delimiter=',')
y_train = list(ytrain)
y_train = np.array(y_train)
y_train = y_train.astype(np.float)
with open('x_test.csv', newline='') as xTest:
xtest = csv.reader(xTest, delimiter=',')
x_test = list(xtest)
x_test = np.array(x_test)
x_test = x_test.astype(np.float)
model = LogisticRegression(eta = 0.01, n_iterations = 1000, lamb = 100)
model.fit(x_train, y_train)
ypre = model.predict(x_test)
print(ypre)