Я настраиваю свои данные с помощью модели, найденной в github. Я новичок в программировании с python.
Я не понимаю, как это исправить. Может кто-нибудь сказать, пожалуйста, что это за ошибка и как ее исправить?
Traceback (most recent call last):
File "finetune.py", line 254, in <module>
main()
File "finetune.py", line 127, in main
train(TrainImgLoader, model, optimizer, log, epoch)
File "finetune.py", line 170, in train
loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
File "finetune.py", line 170, in <listcomp>
loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
IndexError: too many indices for tensor of dimension 3
Вот фрагмент кода.
for epoch in range(args.start_epoch, args.epochs):
log.info('This is {}-th epoch'.format(epoch))
adjust_learning_rate(optimizer, epoch)
train(TrainImgLoader, model, optimizer, log, epoch)
savefilename = args.save_path + '/checkpoint.tar'
torch.save({
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, savefilename)
if epoch % 1 ==0:
test(TestImgLoader, model, log)
test(TestImgLoader, model, log)
log.info('full training time = {:.2f} Hours'.format((time.time() - start_full_time) / 3600))
def train(dataloader, model, optimizer, log, epoch=0):
stages = 3 + args.with_spn
losses = [AverageMeter() for _ in range(stages)]
length_loader = len(dataloader)
model.train()
for batch_idx, (imgL, imgR, disp_L) in enumerate(dataloader):
imgL = imgL.float()#.cuda()
imgR = imgR.float()#.cuda()
disp_L = disp_L.float()#.cuda()
optimizer.zero_grad()
mask = disp_L > 0
mask.detach_()
outputs = model(imgL, imgR)
if args.with_spn:
if epoch >= args.start_epoch_for_spn:
num_out = len(outputs)
else:
num_out = len(outputs) - 1
else:
num_out = len(outputs)
outputs = [torch.squeeze(output, 1) for output in outputs]
loss = [args.loss_weights[x] * F.smooth_l1_loss(outputs[x][mask], disp_L[mask], size_average=True)
for x in range(num_out)]
sum(loss).backward()
optimizer.step()
Большое спасибо!