pytorch機(jī)器學(xué)習(xí)一個(gè)重要模塊。nn是核心。基礎(chǔ)太差,搞個(gè)簡(jiǎn)單的模型先熟悉torch架構(gòu).
########圖像識(shí)別
##步驟1
#a)導(dǎo)入庫(kù)
import torch
import torch.nnas nn
import torch.nn.functionalas F
import torch.optimas optim
from torchvisionimport datasets, transforms
import torchvision
from torch.autogradimport Variable
from torch.utils.dataimport DataLoader
import cv2
#b)獲取訓(xùn)練集和測(cè)試集
# 下載訓(xùn)練集
train_dataset = datasets.MNIST(root='D:\data2\pytorch',
train=True,
transform=transforms.ToTensor(),
download=True)
# 下載測(cè)試集
test_dataset = datasets.MNIST(root='D:\data2\pytorch',
train=False,
transform=transforms.ToTensor(),
download=True)
#root 用于指定數(shù)據(jù)集在下載之后的存放路徑
#transform 用于指定導(dǎo)入數(shù)據(jù)集需要對(duì)數(shù)據(jù)進(jìn)行那種變化操作
#train是指定在數(shù)據(jù)集下載完成后需要載入的那部分?jǐn)?shù)據(jù),設(shè)置為 True 則說(shuō)明載入的是該數(shù)據(jù)集的訓(xùn)練集部分,設(shè)置為 False 則說(shuō)明載入的是該數(shù)據(jù)集的測(cè)試集部分
#download 為 True 表示數(shù)據(jù)集需要程序自動(dòng)幫你下載
#這樣設(shè)置并運(yùn)行后,就會(huì)在指定路徑中下載 MNIST 數(shù)據(jù)集,之后就可以使用了。
#數(shù)據(jù)裝載和預(yù)覽
# dataset 參數(shù)用于指定我們載入的數(shù)據(jù)集名稱(chēng)
# batch_size參數(shù)設(shè)置了每個(gè)包中的圖片數(shù)據(jù)個(gè)數(shù)
# 在裝載的過(guò)程會(huì)將數(shù)據(jù)隨機(jī)打亂順序并進(jìn)打包
# 裝載訓(xùn)練集
batch_size=60
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,shuffle=True)
# 裝載測(cè)試集
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,shuffle=True)
images, labels =next(iter(train_loader))
img = torchvision.utils.make_grid(images)
img = img.numpy().transpose(1,2,0)
std = [0.5,0.5,0.5]
mean = [0.5,0.5,0.5]
img = img * std + mean
print(labels)
cv2.imshow('win', img)
key_pressed = cv2.waitKey(0)
#搭建神經(jīng)網(wǎng)絡(luò)
# 卷積層使用torch.nn.Conv2d
# 激活層使用torch.nn.ReLU
# 池化層使用torch.nn.MaxPool2d
# 全連接層使用torch.nn.Linear
class LeNet(nn.Module):
def __init__(self):
super(LeNet,self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(1,6,3,1,2), nn.ReLU(),
nn.MaxPool2d(2,2))
self.conv2 = nn.Sequential(nn.Conv2d(6,16,5), nn.ReLU(),
nn.MaxPool2d(2,2))
self.fc1 = nn.Sequential(nn.Linear(16 *5 *5,120),
nn.BatchNorm1d(120), nn.ReLU())
self.fc2 = nn.Sequential(
nn.Linear(120,84),
nn.BatchNorm1d(84),
nn.ReLU(),
nn.Linear(84,10))
# 最后的結(jié)果一定要變?yōu)?10,因?yàn)閿?shù)字的選項(xiàng)是0 ~ 9
? def forward(self, x):
x =self.conv1(x)
x =self.conv2(x)
x = x.view(x.size()[0], -1)
x =self.fc1(x)
x =self.fc2(x)
x =self.fc3(x)
return x
#訓(xùn)練模型
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size =64
LR =0.001
net = LeNet()#.to(device)
# 損失函數(shù)使用交叉熵
criterion = nn.CrossEntropyLoss()
# 優(yōu)化函數(shù)使用 Adam 自適應(yīng)優(yōu)化算法
optimizer = optim.Adam(
net.parameters(),
lr=LR,
)
epoch =1
if __name__ =='__main__':
for epochin range(epoch):
sum_loss =0.0
? ? ? ? for i, datain enumerate(train_loader):
inputs, labels = data
inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda()
optimizer.zero_grad()#將梯度歸零
? ? ? ? ? outputs = net(inputs)#將數(shù)據(jù)傳入網(wǎng)絡(luò)進(jìn)行前向運(yùn)算
? ? ? ? ? loss = criterion(outputs, labels)#得到損失函數(shù)
? ? ? ? ? loss.backward()#反向傳播
? ? ? ? ? optimizer.step()#通過(guò)梯度做一步參數(shù)更新
? ? ? ? ? # print(loss)
? ? ? ? ? sum_loss += loss.item()
if i %100 ==99:
print('[%d,%d] loss:%.03f' %
(epoch +1, i +1, sum_loss /100))
sum_loss =0.0
? ? net.eval()#將模型變換為測(cè)試模式
? ? correct =0
? ? total =0
? ? for data_testin test_loader:
images, labels = data_test
images, labels = Variable(images).cuda(), Variable(labels).cuda()
output_test = net(images)
_, predicted = torch.max(output_test,1)
total += labels.size(0)
correct += (predicted == labels).sum()
print("correct1: ", correct)
print("Test acc: {0}".format(correct.item() /
len(test_dataset)))