Линейная регрессия с получением NaN за убыток - PullRequest
0 голосов
/ 28 мая 2018

Не могу понять, почему модель линейной регрессии Кераса не работает.Используя данные Boston Housing.Get Loss как nan

path='/Users/admin/Desktop/airfoil_self_noise.csv'
df=pd.read_csv(path,sep='\t',header=None)

y=df[5] #TARGET

df2=df.iloc[:,:-1] 


X_train, X_test, y_train, y_test = train_test_split(df2, y, test_size=0.2)

p = Sequential()

p.add(Dense(units=20, activation='relu', input_dim=5))
p.add(Dense(units=20, activation='relu'))
p.add(Dense(units=1))

p.compile(loss='mean_squared_error',
              optimizer='sgd')

p.fit(X_train, y_train, epochs=10, batch_size=32)

, это приводит к:

Epoch 1/10
1202/1202 [==============================] - 0s 172us/step - loss: nan

Epoch 2/10
1202/1202 [==============================] - 0s 37us/step - loss: nan

Epoch 3/10
1202/1202 [==============================] - 0s 38us/step - loss: nan

Epoch 4/10
1202/1202 [==============================] - 0s 36us/step - loss: nan

Epoch 5/10
1202/1202 [==============================] - 0s 36us/step - loss: nan

Epoch 6/10
1202/1202 [==============================] - 0s 40us/step - loss: nan

1 Ответ

0 голосов
/ 28 мая 2018

Просто для начала, опираясь на вершину Потери NaN при обучении регрессионной сети

import pandas as pd
import keras

from keras.layers import Dense, Input
from keras import Sequential

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

sc = StandardScaler()

#Grabbing these 2 lines from your example
path='/Users/admin/Desktop/airfoil_self_noise.csv'
df = pd.read_csv("airfoil_self_noise.csv", sep = '\t', header = None)

y = df[5]
df2 = df.iloc[:, :-1]

#preprocessing. Vectorization and Scaling
X_train, X_test, y_train, y_test = train_test_split(df2.values, y.values, test_size = 0.2)
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)

p = Sequential()

p.add(Dense(units = 20, activation ='relu', input_dim = 5))
p.add(Dense(units = 20, activation ='relu'))
p.add(Dense(units = 1))

p.compile(loss = 'mean_squared_error', optimizer = 'adam')

print(p.fit(X_train, y_train, epochs = 100, batch_size = 64))
...