это мой код, он может корректно работать с MNIST, но не может работать с моими данными, такими как MNIST.
import torch as pt
import torchvision as ptv
import numpy as np
import scipy.io as sio
train_set = ptv.datasets.MNIST("../../pytorch_database/mnist/train",train=True,transform=ptv.transforms.ToTensor(),download=True)
test_set = ptv.datasets.MNIST("../../pytorch_database/mnist/test",train=False,transform=ptv.transforms.ToTensor(),download=True)
train_dataset = pt.utils.data.DataLoader(train_set,batch_size=100)
test_dataset = pt.utils.data.DataLoader(test_set,batch_size=100)
class SBPEstimateDataset():
def __init__(self):
data = sio.loadmat("D:/demo_SBPFea1.mat")
self.fea = data['fea']
self.sbp = data['sbp']
# self.sbp = np.array(data['sbp']).T
# print(np.array(data['sbp']).T)
# print(self.sbp)
def __len__(self):
return len(self.sbp)
def __getitem__(self, idx):
fea = pt.FloatTensor(self.fea[idx])
# fea = self.fea[idx]
fea=fea.reshape(1,6,1)
# sbp = pt.LongTensor(self.sbp[idx])
sbp=int(self.sbp[idx])
# sbp = self.sbp[idx]
# print(sbp[0,idx])
return fea,sbp
dataset=SBPEstimateDataset()
print(train_set[1])
print(dataset[1])
# (a1,b1)=train_set[0]
# print(a1.shape)
# (a2,b2)=dataset[0]
# a2=a2.view(-1,6,1)
# print(a2.shape)
# a2=a2.view(-1,6*1)
# # print(a2.reshape(1,6).shape)
# print(a2.shape)
t_dataset=pt.utils.data.DataLoader(dataset,batch_size=50,shuffle=True)
class MLP(pt.nn.Module):
def __init__(self):
super(MLP,self).__init__()
self.fc1 = pt.nn.Linear(6,7)
self.fc2 = pt.nn.Linear(7,8)
self.fc3 = pt.nn.Linear(8,9)
def forward(self, din):
din = din.view(-1,1*6)
dout = pt.nn.functional.relu(self.fc1(din))
dout = pt.nn.functional.relu(self.fc2(dout))
return pt.nn.functional.softmax(self.fc3(dout),dim=1)
#on MNIST
# class MLP(pt.nn.Module):
# def __init__(self):
# super(MLP, self).__init__()
# self.fc1 = pt.nn.Linear(784, 512)
# self.fc2 = pt.nn.Linear(512, 128)
# self.fc3 = pt.nn.Linear(128, 10)
#
# def forward(self, din):
# din = din.view(-1, 28 * 28)
# dout = pt.nn.functional.relu(self.fc1(din))
# dout = pt.nn.functional.relu(self.fc2(dout))
# return pt.nn.functional.softmax(self.fc3(dout))
model = MLP().cuda()
print(model)
# loss func and optim
optimizer = pt.optim.SGD(model.parameters(),lr=0.01,momentum=0.9)
lossfunc = pt.nn.CrossEntropyLoss().cuda()
# if(input()!=None):
for x in range(10):
for i, data in enumerate(t_dataset):
#on MNIST
# for i, data in enumerate(train_dataset):
optimizer.zero_grad()
(inputs, labels) = data
inputs = pt.autograd.Variable(inputs).cuda()
labels = pt.autograd.Variable(labels).cuda()
outputs = model(inputs)
loss = lossfunc(outputs, labels)
loss.backward()
optimizer.step()
if x==5:
print("in:")
# print(inputs[0])
print(labels[0])
print("out:")
мой формат данных в тензоре
(tensor([[[146.8442],
[146.8442],
[145.2633],
[142.1075],
[135.7899],
[126.3164]]]), 0)
MNISTформат в тензорном
(tensor([[[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
...
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.2627, 0.8196, 0.9882, 0.9882, 0.2196, 0.0235,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.1137, 0.7098, 0.9882, 0.8510, 0.3294, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.9922, 0.9882, 0.9882, 0.3294, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0510,
0.7490, 1.0000, 0.8431, 0.1216, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.6039,
0.9882, 0.9922, 0.4745, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000],
...
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.2235, 0.9882,
0.9882, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000]
]
]), 7)
Я использую MATLAB для определения файла с 1541 * 6 'fea' и 1541 * 1 'sbp'. Они сохраняются в формате 'single' в 'D: /demo_SBPFea1.mat».Это работает, но результат неправильный.
MY:
in:
tensor(7, device='cuda:0')
out:
tensor([0.0068, 0.4190, 0.0369, 0.1688, 0.0535, 0.0957, 0.0971, 0.0516, 0.0707],
device='cuda:0', grad_fn=<SelectBackward>)
, если я изменю модель нейронной сети.Он корректно работает на MNIST.
print
on MNIST:
in:
tensor(3, device='cuda:0')
out:
tensor([3.0662e-08, 7.8311e-09, 1.6701e-06, 9.9996e-01, 1.3490e-10, 7.4925e-09,
1.7589e-10, 7.7624e-12, 3.9829e-05, 2.4372e-12], device='cuda:0',
grad_fn=<SelectBackward>)