Я пытался переучить модель, но, к сожалению, последние 2 дня я получаю ту же ошибку.
Не могли бы вы немного помочь с этим?
Начальная работа:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import torchvision.models as models
from collections import OrderedDict
Datasets:
data_dir = 'flowers'
train_dir = data_dir + '/train'
data_dir = 'flowers'
train_transforms = transforms.Compose([transforms.Resize(224),
transforms.RandomResizedCrop(224),
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True)
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
Пытался использовать предварительно обученную модель и обучать только классификатору:
# Load a pretrained model
model = models.vgg16(pretrained=True)
# Keep the parameters the same
for param in model.parameters():
param.requires_grad = False
# and final output 102, since tht we have 102 flowers.
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 4096)),
('relu', nn.ReLU()),
('fc3', nn.Linear(4096, 102)),
('output', nn.LogSoftmax(dim=1))
]))
# Replace model's old classifier with the new classifier
model.classifier = classifier
# Calculate the loss
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to('cuda')
epochs = 1
print_every = 40
steps = 0
for e in range(epochs):
running_loss = 0
model.train()
# model = model.double()
for images, labels in iter(trainloader):
steps += 1
images.resize_(32, 3, 224, 224)
inputs = Variable(images.to('cuda'))
targets = Variable(labels.to('cuda'))
optimizer.zero_grad()
# Forward and backward passes
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
#running_loss += loss.data[0]
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
Сообщение об ошибке:
RuntimeError: Ожидаемый объект типа torch.FloatTensor
, но найденный тип torch.cuda.DoubleTensor
для аргумента # 2 weight