Я постоянно получаю эту ошибку при создании путаницы.Моя функциональная переменная, а также целевая переменная все имеют labelEncoded, но до сих пор не знают, почему она генерирует эту ошибку.
Ошибка: C: \ Users \ Strat Com \ PycharmProjects \ IGN Review \ venv \ lib \ site-packages \ sklearn \ metrics \ification.py: 261: FutureWarning: поэлементное сравнение не удалось;вместо этого возвращает скаляр, но в будущем будет выполнять поэлементное сравнение
ValueError: Как минимум одна указанная метка должна быть в y_true
Примечание: КодПояснения и набор данных прилагаются.Использование Windows 10 и запуск всего этого кода на ноутбуке Jupyter
Ссылка набора данных
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
DataFrame=pd.read_csv("DataSet.txt",sep='\t',low_memory=False,skip_blank_lines=True) # Loading the data into the Data Frame
DataFrame=DataFrame.dropna(how='all')
half_count=len(DataFrame)/2
DataFrame=DataFrame.dropna(thresh=half_count,axis=1) # Dropping any column with more than 50% missing values
FrameExplorer = pd.DataFrame(DataFrame.dtypes,columns=['dtypes'])
FrameExplorer=FrameExplorer.reset_index()
FrameExplorer=FrameExplorer.rename(columns={'index':'ColumnName'})
drop_list=['IDShop','PaymentDay','ShopRank','OtherCards','QuantBankAccounts','ApplicationBooth','InsuranceOption']
DataFrame=DataFrame.drop(drop_list,axis=1)
DataFrame = DataFrame.loc[:,DataFrame.apply(pd.Series.nunique) != 1] # Getting all the columns which dont have 1 unique value
for cols in DataFrame.columns:
if (len(DataFrame[cols].unique())<4):
print (DataFrame[cols].value_counts())
null_counts = DataFrame.isnull().sum()
print("Number of Null count in each column \n{}".format(null_counts))
# Here we would remove the column containing more than 1% of the rows contains null values So from above column names so
# "Sex" and "Reference 2" would be dropped as they contain approx 10% of rows of missing values
DataFrame=DataFrame.drop(['Sex','Reference2'],axis=1)
DataFrame=DataFrame.dropna() # Dropping rows containing missing values to make data more cleaner
DataFrame=DataFrame.drop('Reference1',axis=1)
# Now we would be Label Encoding the columns of object dataType as shown above as they contain only "Y" and "N" Value
FeatureEncoder=preprocessing.LabelEncoder()
DataFrame['MaritalStatus']=FeatureEncoder.fit_transform(DataFrame['MaritalStatus'])
DataFrame['ResidencialPhone']=FeatureEncoder.fit_transform(DataFrame['ResidencialPhone'])
DataFrame['ResidenceType']=FeatureEncoder.fit_transform(DataFrame['ResidenceType'])
DataFrame['MothersName']=FeatureEncoder.fit_transform(DataFrame['MothersName'])
DataFrame['FathersName']=FeatureEncoder.fit_transform(DataFrame['FathersName'])
DataFrame['WorkingTown']=FeatureEncoder.fit_transform(DataFrame['WorkingTown'])
DataFrame['WorkingState']=FeatureEncoder.fit_transform(DataFrame['WorkingState'])
DataFrame['PostalAddress']=FeatureEncoder.fit_transform(DataFrame['PostalAddress'])
# Now we will start to split the data into training set and testing set to train the model and then test it
cols = [col for col in DataFrame.columns if col not in ['Label']] # Label is the Target Feature
FeatureData=DataFrame[cols] # Feature Variables
TargetData=DataFrame['Label'] # Target Variables
#split data set into train and test sets
FeatureData_Train, FeatureData_Test, TargetData_Train, TargetData_Test = train_test_split(FeatureData,TargetData, test_size = 0.30, random_state = 10)
type(FeatureData_Train)
type(TargetData_Train)
# Next we will be feeding all of the split done above to the model
neighbor=KNeighborsClassifier(n_neighbors=3) # Creating an Object of KNN Classifier
neighbor.fit(FeatureData_Train,TargetData_Train) # Training the model to classify
PredictionData=neighbor.predict(FeatureData_Test) # Predicting the Response
# evaluate accuracy
print ("KNeighbors accuracy score : ",accuracy_score(TargetData_Test, PredictionData))
from yellowbrick.classifier import ClassificationReport
from yellowbrick.classifier import ConfusionMatrix
# Instantiate the classification model and visualizer
visualizer = ClassificationReport(neighbor, classes=['0','1'])
visualizer.fit(FeatureData_Train,TargetData_Train) # Fit the training data to the visualizer
visualizer.score(FeatureData_Test,TargetData_Test) # Evaluate the model on the test data
g = visualizer.poof() # Draw/show/poof the data
cm = ConfusionMatrix(neighbor, classes=['0','1'])
cm.fit(FeatureData_Train,TargetData_Train)
cm.score(FeatureData_Test,TargetData_Test)