Logisti c Стоимость регрессии = нан - PullRequest
0 голосов
/ 22 января 2020

Я пытаюсь реализовать регрессионную модель логистики c, но продолжаю получать значения 'nan' в качестве стоимости. Я пробовал это с несколькими наборами данных, но это дает тот же результат. Различные источники дают немного различную реализацию градиентного спуска, поэтому я не уверен, что реализация градиента здесь верна. Вот полный код:

import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split

class LogisticRegression:
    def __init__(self, lr=0.001, n_iter=8000):
        self.lr = lr
        self.n_iter = n_iter
        self.weights = None

    """
    z is dot product of features and weights, which is then mapped to discrete values, such as between 0 and 1
    """
    def sigmoid(self, z):
        return 1.0/(1+np.exp(-z))

    def predict(self, x_features, weights):
        """Returns 1d array of probabilities that the class label == 1"""
        z = np.dot(x_features, weights)
        return self.sigmoid(z)

    def cost(self, x_features, labels, weights):
        """
        Using Mean Absolute Error

        Cost = (labels*log(predictions) + (1-labels)*log(1-predictions) ) / len(labels) 
        """
        observation = len(labels)
        predictions = self.predict(x_features, weights)
        #take the error when label = 1
        class1_cost = -labels*np.log(predictions)
        #take the error when label = 0
        class2_cost = (1-labels)*np.log(1-predictions)
        #take sum of both the cost
        cost = class1_cost+class2_cost
        #take the average cost
        cost = cost.sum()/observation
        return cost

    def update_weight(self, x_features, labels, weights):
        """
        Vectorized Gradient Descent
        """
        N = len(x_features)
        #get predictions (approximation of y)
        predictions = self.predict(x_features, weights)
        gradient = np.dot(x_features.T, predictions-labels)
        #take the average cost of derivative for each feature
        gradient /= N
        #multiply gradients by our learning rate
        gradient *= self.lr
        #subtract from our weights to minimize cost
        weights -= gradient
        return weights

    def give_predictions(self, x_features, weights):
        linear_model_prediction =  self.predict(x_features, weights)
        y_predicted_cls = [1 if i>0.5 else 0 for i in linear_model_prediction]
        return y_predicted_cls

    def train(self, features, labels):
        n_samples, n_features = features.shape
        self.weights = np.zeros((n_features,1)) #initialize the weight matrix
        cost_history = []
        for i in range(self.n_iter):
            self.weights = self.update_weight(features, labels, self.weights)
            #calculate error for auditing purposes
            cost = self.cost(features, labels, self.weights)
            cost_history.append(cost)
            #Log process
            if i%1000 == 0:
                print("iter: {}, cost: {}".format(str(i),str(cost)))

        return self.weights, cost_history

def generate_data():
    bc = datasets.load_breast_cancer()
    x_features, labels = bc.data, bc.target

    x_train, x_test, y_train, y_test = train_test_split(x_features, labels, test_size=0.2, random_state=1234)
    return x_train, x_test, y_train, y_test

x_train, x_test, y_train, y_test = generate_data()

model = LogisticRegression()
model.train(x_train, y_train)

Ответы [ 2 ]

0 голосов
/ 23 января 2020

Мне пришлось применить масштабирование объектов к x_train перед тренировкой модели. Я использовал библиотеку sklearn StandardScaler

from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
x_train = sc_X.fit_transform(x_train)
0 голосов
/ 22 января 2020

Ваша функция стоимости кажется правильной, но вам нужно иметь 'y' в качестве вектора нулей и единицы (one_hot_encoding).

...