Почему я не могу рассчитать среднюю точность, используя make_scorer? - PullRequest
0 голосов
/ 28 апреля 2020

Я пытаюсь вычислить перекрестно подтвержденный показатель точности для моей модели классификации нескольких классов. Для этого я использовал make_scorer, чтобы установить среднее значение для взвешенного.

#cross-validated precision
from sklearn.metrics import make_scorer
scorer = make_scorer(average_precision_score, average = 'weighted')
scorer

Однако, когда я запускаю код, я получаю эту ошибку:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-170-b2ce1e8a452e> in <module>
      2 from sklearn.metrics import make_scorer
      3 scorer = make_scorer(average_precision_score, average = 'weighted')
----> 4 cv_precision = cross_val_score(clf, X, y, cv=5, scoring=scorer)
      5 cv_precision = np.mean(cv_prevision)
      6 cv_precision

~\anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, error_score)
    388                                 fit_params=fit_params,
    389                                 pre_dispatch=pre_dispatch,
--> 390                                 error_score=error_score)
    391     return cv_results['test_score']
    392 

~\anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
    234             return_times=True, return_estimator=return_estimator,
    235             error_score=error_score)
--> 236         for train, test in cv.split(X, y, groups))
    237 
    238     zipped_scores = list(zip(*scores))

~\anaconda3\lib\site-packages\joblib\parallel.py in __call__(self, iterable)
   1002             # remaining jobs.
   1003             self._iterating = False
-> 1004             if self.dispatch_one_batch(iterator):
   1005                 self._iterating = self._original_iterator is not None
   1006 

~\anaconda3\lib\site-packages\joblib\parallel.py in dispatch_one_batch(self, iterator)
    833                 return False
    834             else:
--> 835                 self._dispatch(tasks)
    836                 return True
    837 

~\anaconda3\lib\site-packages\joblib\parallel.py in _dispatch(self, batch)
    752         with self._lock:
    753             job_idx = len(self._jobs)
--> 754             job = self._backend.apply_async(batch, callback=cb)
    755             # A job can complete so quickly than its callback is
    756             # called before we get here, causing self._jobs to

~\anaconda3\lib\site-packages\joblib\_parallel_backends.py in apply_async(self, func, callback)
    207     def apply_async(self, func, callback=None):
    208         """Schedule a func to be run"""
--> 209         result = ImmediateResult(func)
    210         if callback:
    211             callback(result)

~\anaconda3\lib\site-packages\joblib\_parallel_backends.py in __init__(self, batch)
    588         # Don't delay the application, to avoid keeping the input
    589         # arguments in memory
--> 590         self.results = batch()
    591 
    592     def get(self):

~\anaconda3\lib\site-packages\joblib\parallel.py in __call__(self)
    254         with parallel_backend(self._backend, n_jobs=self._n_jobs):
    255             return [func(*args, **kwargs)
--> 256                     for func, args, kwargs in self.items]
    257 
    258     def __len__(self):

~\anaconda3\lib\site-packages\joblib\parallel.py in <listcomp>(.0)
    254         with parallel_backend(self._backend, n_jobs=self._n_jobs):
    255             return [func(*args, **kwargs)
--> 256                     for func, args, kwargs in self.items]
    257 
    258     def __len__(self):

~\anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, error_score)
    542     else:
    543         fit_time = time.time() - start_time
--> 544         test_scores = _score(estimator, X_test, y_test, scorer)
    545         score_time = time.time() - start_time - fit_time
    546         if return_train_score:

~\anaconda3\lib\site-packages\sklearn\model_selection\_validation.py in _score(estimator, X_test, y_test, scorer)
    589         scores = scorer(estimator, X_test)
    590     else:
--> 591         scores = scorer(estimator, X_test, y_test)
    592 
    593     error_msg = ("scoring must return a number, got %s (%s) "

~\anaconda3\lib\site-packages\sklearn\metrics\_scorer.py in __call__(self, estimator, *args, **kwargs)
     85             if isinstance(scorer, _BaseScorer):
     86                 score = scorer._score(cached_call, estimator,
---> 87                                       *args, **kwargs)
     88             else:
     89                 score = scorer(estimator, *args, **kwargs)

~\anaconda3\lib\site-packages\sklearn\metrics\_scorer.py in _score(self, method_caller, estimator, X, y_true, sample_weight)
    210         else:
    211             return self._sign * self._score_func(y_true, y_pred,
--> 212                                                  **self._kwargs)
    213 
    214 

~\anaconda3\lib\site-packages\sklearn\metrics\_ranking.py in average_precision_score(y_true, y_score, average, pos_label, sample_weight)
    213                                 pos_label=pos_label)
    214     return _average_binary_score(average_precision, y_true, y_score,
--> 215                                  average, sample_weight=sample_weight)
    216 
    217 

~\anaconda3\lib\site-packages\sklearn\metrics\_base.py in _average_binary_score(binary_metric, y_true, y_score, average, sample_weight)
     72     y_type = type_of_target(y_true)
     73     if y_type not in ("binary", "multilabel-indicator"):
---> 74         raise ValueError("{0} format is not supported".format(y_type))
     75 
     76     if y_type == "binary":

ValueError: multiclass format is not supported

Кроме того, она не работает когда я пытаюсь ввести перекрестную проверку ...

from sklearn.metrics import make_scorer
scorer = make_scorer(average_precision_score, average = 'weighted')
cv_precision = cross_val_score(clf, X, y, cv=5, scoring=scorer)
cv_precision = np.mean(cv_prevision)
cv_precision

я получаю ту же ошибку.

1 Ответ

0 голосов
/ 29 апреля 2020

Существуют некоторые ограничения на использование average_precision_score при работе с классификациями нескольких классов. В качестве обходного пути вы можете использовать OneVsRestClassifier, как задокументировано здесь вместе с label_binarize, как показано ниже:

from sklearn.metrics import make_scorer
from sklearn.metrics import average_precision_score    
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn import preprocessing
from sklearn.datasets import make_classification

scorer = make_scorer(average_precision_score, average = 'weighted')

X, y = make_classification(n_features=10, random_state=0, n_classes=3, n_samples=1000, n_informative=8)
y = preprocessing.label_binarize(y, classes=[0, 1, 2])
clf = OneVsRestClassifier(linear_model.LogisticRegression())

cross_val_score(clf, X, y, cv=3,scoring=scorer)
array([0.586501  , 0.54517146, 0.596331  ])  

Надеюсь, это поможет!

...