Я пытаюсь изменить размер изображения в Pytorch для последующей обработки во время обучения нейронной сети. Но получаю ошибку трансляции, когда я пытаюсь вызвать transforms.Resize () на изображении.
Вот мой фрагмент кода.
cuda:0
Classifier(
(fc1): Linear(in_features=784, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=128, bias=True)
(fc3): Linear(in_features=128, out_features=64, bias=True)
(fc4): Linear(in_features=64, out_features=10, bias=True)
)
Traceback (most recent call last):
File "netz.py", line 71, in <module>
train()
File "netz.py", line 46, in train
outputs=model(inputs)
File "/home/yyyy/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "netz.py", line 18, in forward
x=F.relu(self.fc1(x))
File "/home/yyy/anaconda3/lib/python3.7/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/home/yyy/anaconda3/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 55, in forward
return F.linear(input, self.weight, self.bias)
File "/home/yyy/anaconda3/lib/python3.7/site-packages/torch/nn/functional.py", line 1024, in linear
return torch.addmm(bias, input, weight.t())
RuntimeError: size mismatch, m1: [64 x 59536], m2: [784 x 256] at /opt/conda/conda-bld/pytorch_1532584813488/work/aten/src/THC/generic/THCTensorMathBlas.cu:249
---- Corresponding Code ---
import torch
from torch import nn,optim
import torch.nn.functional as F
from torchvision import datasets,transforms
NUM_EPOCH=700
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1=nn.Linear(784,256)
self.fc2=nn.Linear(256,128)
self.fc3=nn.Linear(128,64)
self.fc4=nn.Linear(64,10)
def forward(self,x):
x=x.view(x.shape[0],-1)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=F.relu(self.fc3(x))
x=F.log_softmax(self.fc4(x),dim=1)
return x
def train():
transform=transforms.Compose([
transforms.Resize(244),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])
trainset=datasets.FashionMNIST('./data',download=True,transform=transform)
trainloader=torch.utils.data.DataLoader(trainset,batch_size=64,shuffle=True,num_workers=2)
model=Classifier()
model=model.to(device)
criterion=nn.CrossEntropyLoss()
optimizer=optim.Adam(model.parameters(),lr=0.001)
for epoch in range(NUM_EPOCH):
running_loss=0.0
for i, data in enumerate(trainloader,0):
inputs,labels=data
inputs=inputs.to(device)
labels=labels.to(device)
optimizer.zero_grad()
outputs=model(inputs)
outputs.to(device)
loss=criterion(outputs,labels)
loss.backward()
optimizer.step()
running_loss+=loss.item()
if(i%20 == 19):
print("epoch: ",epoch+1)
print("i + 1",i)
print("loss: ",running_loss/20.0)
#print('[%d, 5d] loss: %.3f' %(epoch+1,i+1,running_loss/20))
running_loss=0.0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net=Classifier()
net.to(device)
print(net)
train()
Итак, мой вопрос, каков наиболее подходящий способ изменить размер изображения, пока я
обучение сети для моего конкретного случая использования?
Я использую Cuda8.0 и CudaDNN7.1 с Pytorch версии 0.4.1 и Python3.7 в системе Ubuntu 16.04 LTS.