import pandas as pd
import os as os
import numpy as np
import statsmodels.formula.api as sm
Сначала я создал dict для хранения 51 набора данных
d = {}
for x in range(0, 52):
d[x]=ccf.loc[ccf['state'] == x]
d.keys()
dict_keys([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51])
Для проверки
d[1].head()
Затем я запустил код в цикле, используя позицию в dict
results={}
for x in range(1, 51):
results[x] = sm.Logit(d[x].fraudRisk, d[x][names]).fit().summary2()
Однако я чувствовал, что должен использовать несколько классификаторов в sklearn.Сначала мне нужно разделить данные, как указано выше.
from sklearn.model_selection import train_test_split
# Multiple Models
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
#Model Metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
lr={}
gnb={}
svc={}
rfc={}
classifier={}
regr_1={}
regr_2={}
import datetime
datetime.datetime.now()
for x in range(1, 51):
X_train, X_test, y_train, y_test = train_test_split(d[x][names], d[x].fraudRisk, test_size=0.3)
print(len(X_train))
print(len(y_test))
# Create classifiers
lr[x] = LogisticRegression().fit(X_train, y_train).predict(X_test)
gnb[x] = GaussianNB().fit(X_train, y_train).predict(X_test)
svc[x] = LinearSVC(C=1.0).fit(X_train, y_train).predict(X_test)
rfc[x] = RandomForestClassifier(n_estimators=1).fit(X_train, y_train).predict(X_test)
classifier[x] = KNeighborsClassifier(n_neighbors=3).fit(X_train, y_train).predict(X_test)
print(datetime.datetime.now())
print("Accuracy Score for model for state ",x, 'is ')
print('LogisticRegression',accuracy_score(y_test,lr[x]))
print('GaussianNB',accuracy_score(y_test,gnb[x]))
print('LinearSVC',accuracy_score(y_test,svc[x]))
print('RandomForestClassifier',accuracy_score(y_test,rfc[x]))
print('KNeighborsClassifier',accuracy_score(y_test,classifier[x]))
print("Classification Report for model for state ",x, 'is ')
print('LogisticRegression',classification_report(y_test,lr[x]))
print('GaussianNB',classification_report(y_test,gnb[x]))
print('LinearSVC',classification_report(y_test,svc[x]))
print('RandomForestClassifier',classification_report(y_test,rfc[x]))
print('KNeighborsClassifier',classification_report(y_test,classifier[x]))
print("Confusion Matrix Report for model for state ",x, 'is ')
print('LogisticRegression',confusion_matrix(y_test,lr[x]))
print('GaussianNB',confusion_matrix(y_test,gnb[x]))
print('LinearSVC',confusion_matrix(y_test,svc[x]))
print('RandomForestClassifier',confusion_matrix(y_test,rfc[x]))
print('KNeighborsClassifier',confusion_matrix(y_test,classifier[x]))
print("Area Under Curve for model for state ",x, 'is ')
print('LogisticRegression',roc_auc_score(y_test,lr[x]))
print('GaussianNB',roc_auc_score(y_test,gnb[x]))
print('LinearSVC',roc_auc_score(y_test,svc[x]))
print('RandomForestClassifier',roc_auc_score(y_test,rfc[x]))
print('KNeighborsClassifier',roc_auc_score(y_test,classifier[x]))
Потребовалось много времени для 5 моделей X 51 состояний с несколькими метриками, но оно того стоило.Дайте мне знать, если есть более быстрый или лучший способ написать более элегантный и менее хакерский код