Я пытаюсь создать векторную машину поддержки с GridSearchCV
с использованием VScode, код, как показано ниже:
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
parameters = {'C':(1,10,), 'kernel': ("rbf",),'gamma':(1,10,100,1000)}
svc = SVC(probability = True)
svc_cv = GridSearchCV(svc, param_grid = parameters, refit = True, n_jobs= -1)
svc_cv.fit(x_train, y_train)
Проблема в том, что когда я передаю n_jobs
, это не 1 (либо n_jobs = -1
или 2
или любое другое), ошибка возникает как:
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
in
----> 1 svc_cv.fit(x_train, y_train)
C:\Python38\lib\site-packages\sklearn\utils\validation.py in inner_f(*args, **kwargs)
71 FutureWarning)
72 kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
---> 73 return f(**kwargs)
74 return inner_f
75
C:\Python38\lib\site-packages\sklearn\model_selection\_search.py in fit(self, X, y, groups, **fit_params)
693 verbose=self.verbose)
694 results = {}
--> 695 with parallel:
696 all_candidate_params = []
697 all_out = []
C:\Python38\lib\site-packages\joblib\parallel.py in __enter__(self)
709 def __enter__(self):
710 self._managed_backend = True
--> 711 self._initialize_backend()
712 return self
713
C:\Python38\lib\site-packages\joblib\parallel.py in _initialize_backend(self)
719 """Build a process or thread pool and return the number of workers"""
720 try:
--> 721 n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
722 **self._backend_args)
723 if self.timeout is not None and not self._backend.supports_timeout:
C:\Python38\lib\site-packages\joblib\_parallel_backends.py in configure(self, n_jobs, parallel, prefer, require, idle_worker_timeout, **memmappingexecutor_args)
490 SequentialBackend(nesting_level=self.nesting_level))
491
--> 492 self._workers = get_memmapping_executor(
493 n_jobs, timeout=idle_worker_timeout,
494 env=self._prepare_worker_env(n_jobs=n_jobs),
C:\Python38\lib\site-packages\joblib\executor.py in get_memmapping_executor(n_jobs, **kwargs)
18
19 def get_memmapping_executor(n_jobs, **kwargs):
---> 20 return MemmappingExecutor.get_memmapping_executor(n_jobs, **kwargs)
21
22
C:\Python38\lib\site-packages\joblib\executor.py in get_memmapping_executor(cls, n_jobs, timeout, initializer, initargs, env, temp_folder, context_id, **backend_args)
40 _executor_args = executor_args
41
---> 42 manager = TemporaryResourcesManager(temp_folder)
43
44 # reducers access the temporary folder in which to store temporary
C:\Python38\lib\site-packages\joblib\_memmapping_reducer.py in __init__(self, temp_folder_root, context_id)
529 # exposes exposes too many low-level details.
530 context_id = uuid4().hex
--> 531 self.set_current_context(context_id)
532
533 def set_current_context(self, context_id):
C:\Python38\lib\site-packages\joblib\_memmapping_reducer.py in set_current_context(self, context_id)
533 def set_current_context(self, context_id):
534 self._current_context_id = context_id
--> 535 self.register_new_context(context_id)
536
537 def register_new_context(self, context_id):
C:\Python38\lib\site-packages\joblib\_memmapping_reducer.py in register_new_context(self, context_id)
558 new_folder_name, self._temp_folder_root
559 )
--> 560 self.register_folder_finalizer(new_folder_path, context_id)
561 self._cached_temp_folders[context_id] = new_folder_path
562
C:\Python38\lib\site-packages\joblib\_memmapping_reducer.py in register_folder_finalizer(self, pool_subfolder, context_id)
588 # semaphores and pipes
589 pool_module_name = whichmodule(delete_folder, 'delete_folder')
--> 590 resource_tracker.register(pool_subfolder, "folder")
591
592 def _cleanup():
C:\Python38\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in register(self, name, rtype)
189 '''Register a named resource, and increment its refcount.'''
190 self.ensure_running()
--> 191 self._send('REGISTER', name, rtype)
192
193 def unregister(self, name, rtype):
C:\Python38\lib\site-packages\joblib\externals\loky\backend\resource_tracker.py in _send(self, cmd, name, rtype)
202
203 def _send(self, cmd, name, rtype):
--> 204 msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
205 if len(name) > 512:
206 # posix guarantees that writes to a pipe of less than PIPE_BUF
UnicodeEncodeError: 'ascii' codec can't encode characters in position 18-19: ordinal not in range(128)
Однако я могу запустить его совершенно нормально с n_jobs = -1
, используя браузер jupyter notebook, и гадаю, что случилось.