FastAI v1 PyTorch Custom Model - PullRequest
       43

FastAI v1 PyTorch Custom Model

0 голосов
/ 02 мая 2019

Я пытался использовать fastai с нестандартной моделью факела.Мой код выглядит следующим образом:

X_train = np.load(dirpath + 'X_train.npy')
X_valid = np.load(dirpath + 'X_valid.npy')
Y_train = np.load(dirpath + 'Y_train.npy')
Y_valid = np.load(dirpath + 'Y_valid.npy')

X_train's shape is : (240, 122, 96), 
and Y_train's shape is : (240,1)

Затем я конвертирую их в тензоры факелов,

# Converting data to torch tensors
def to_torch_data(x,np_type,tch_type):
    return torch.from_numpy(x.astype(np_type)).to(tch_type)

X_train = to_torch_data(X_train,float,torch.float32)
X_valid = to_torch_data(X_valid,float,torch.float32)
Y_train = to_torch_data(Y_train,float,torch.float32)
Y_valid = to_torch_data(Y_valid,float,torch.float32)

Создание наборов TensorDataSets для оболочки данных FastBai,

# Creating torch tensor datasets so that data can be used 
# on ImageDataBunch function for fastai
train_ds = tdatautils.TensorDataset(X_train,Y_train)
valid_ds = tdatautils.TensorDataset(X_valid,Y_valid)

# Creating DataBunch object to be used as data in fastai methods.
batch_size = 24
my_data_bunch = DataBunch.create(train_ds,valid_ds,bs=batch_size)

И этомоя модель факела:

# Creating corresponding torch model
import torch.nn.functional as F
class Net(nn.Module):
    def __init__(self,droprate=0,activationF=None):
        super(Net, self).__init__()
        self.lstm_0 = nn.LSTM(96, 720) 
        self.activation_0 = nn.ELU()
        self.dropout_0 = nn.Dropout(p=droprate)
        self.lstm_1 = nn.LSTM(720,480)        
        self.activation_1 = nn.ELU()
        self.batch_norm_1 = nn.BatchNorm1d(122)
        self.fc_2 = nn.Linear(480,128)
        self.dropout_2 = nn.Dropout(p=droprate)
        self.last = nn.Linear(128,1)
        self.last_act = nn.ReLU()

    def forward(self, x):
        out,hid1 = self.lstm_0(x)
        out = self.dropout_0(self.activation_0(out))
        out,hid2 = self.lstm_1(out)
        out = out[:,-1,:]
        out = self.batch_norm_1(self.activation_1(out))
        out = self.dropout_2(self.fc_2(out))
        out = self.last_act(self.last(out))
        return out

#create instance of model
net = Net(droprate=train_droprate,activationF=train_activation) #.cuda()
print(net)  

После всего этого я запускаю метод обучения для lr_find.И я получаю эту ошибку:

Empty                                     Traceback (most recent call last)
C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in _try_get_batch(self, timeout)
    510         try:
--> 511             data = self.data_queue.get(timeout=timeout)
    512             return (True, data)

C:\Anaconda3\envs\fastai\lib\queue.py in get(self, block, timeout)
    171                     if remaining <= 0.0:
--> 172                         raise Empty
    173                     self.not_empty.wait(remaining)

Empty: 

During handling of the above exception, another exception occurred:

RuntimeError                              Traceback (most recent call last)
<ipython-input-35-e4b7603c0a82> in <module>
----> 1 my_learner.lr_find()

~\Desktop\fastai\fastai\fastai\train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
     30     cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
     31     epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 32     learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
     33 
     34 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,

~\Desktop\fastai\fastai\fastai\basic_train.py in fit(self, epochs, lr, wd, callbacks)
    197         callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
    198         if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
--> 199         fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
    200 
    201     def create_opt(self, lr:Floats, wd:Floats=0.)->None:

~\Desktop\fastai\fastai\fastai\basic_train.py in fit(epochs, learn, callbacks, metrics)
     97             cb_handler.set_dl(learn.data.train_dl)
     98             cb_handler.on_epoch_begin()
---> 99             for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
    100                 xb, yb = cb_handler.on_batch_begin(xb, yb)
    101                 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)

C:\Anaconda3\envs\fastai\lib\site-packages\fastprogress\fastprogress.py in __iter__(self)
     70         self.update(0)
     71         try:
---> 72             for i,o in enumerate(self._gen):
     73                 if i >= self.total: break
     74                 yield o

~\Desktop\fastai\fastai\fastai\basic_data.py in __iter__(self)
     73     def __iter__(self):
     74         "Process and returns items from `DataLoader`."
---> 75         for b in self.dl: yield self.proc_batch(b)
     76 
     77     @classmethod

C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
    574         while True:
    575             assert (not self.shutdown and self.batches_outstanding > 0)
--> 576             idx, batch = self._get_batch()
    577             self.batches_outstanding -= 1
    578             if idx != self.rcvd_idx:

C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in _get_batch(self)
    541         elif self.pin_memory:
    542             while self.pin_memory_thread.is_alive():
--> 543                 success, data = self._try_get_batch()
    544                 if success:
    545                     return data

C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in _try_get_batch(self, timeout)
    517             if not all(w.is_alive() for w in self.workers):
    518                 pids_str = ', '.join(str(w.pid) for w in self.workers if not w.is_alive())
--> 519                 raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str))
    520             if isinstance(e, queue.Empty):
    521                 return (False, None)

RuntimeError: DataLoader worker (pid(s) 9584, 7236, 5108, 932, 13228, 13992, 4576, 13204) exited unexpectedly

Я исследовал DataLoader, но не смог найти ничего полезного.

1 Ответ

2 голосов
/ 02 мая 2019

Хотя я не понял сообщение об ошибке, которое вы разместили, я вижу одну проблему в вашем коде.

out = out[:,-1,:] # batch_size x 480
out = self.batch_norm_1(self.activation_1(out))

Но вы объявили batch_norm_1 как:

self.batch_norm_1 = nn.BatchNorm1d(122)

Что должно быть:

self.batch_norm_1 = nn.BatchNorm1d(480)
...