模型訓練與驗證

代碼逐一學習.修改識別參數,計算最優識別

import os, sys, glob, shutil, json

os.environ["CUDA_VISIBLE_DEVICES"] ='0'

import cv2

from PILimport Image

import numpyas np

from tqdmimport tqdm, tqdm_notebook

import torch

torch.manual_seed(0)

torch.backends.cudnn.deterministic =False

torch.backends.cudnn.benchmark =True

import torchvision.modelsas models

import torchvision.transformsas transforms

import torchvision.datasetsas datasets

import torch.nnas nn

import torch.nn.functionalas F

import torch.optimas optim

from torch.autogradimport Variable

from torch.utils.data.datasetimport Dataset

class SVHNDataset(Dataset):#繼承自Dataset的類

? ? def __init__(self, img_path, img_label, transform=None):

self.img_path = img_path

self.img_label = img_label

if transformis not None:

self.transform = transform

else:

self.transform =None

? ? def __getitem__(self, index):

img = Image.open(self.img_path[index]).convert('RGB')

if self.transformis not None:

img =self.transform(img)

lbl = np.array(self.img_label[index],dtype=np.int)

lbl =list(lbl) + (5 -len(lbl)) * [10]

return img, torch.from_numpy(np.array(lbl[:5]))

def __len__(self):

return len(self.img_path)

train_path = glob.glob('C:\\Users\\Zhyang\\Desktop\\match\\mchar_train\\*.png')#glob.glob() 函數,查找符合特定規則的文件路徑名

train_path.sort()#對List進行排序.正序

train_json = json.load(open(r'C:\Users\Zhyang\Desktop\match\mchar_train.json'))

train_label = [train_json[x]['label']for xin train_json]#json中圖片對應結果

# print(len(train_path), len(train_label))

train_loader = torch.utils.data.DataLoader(

SVHNDataset(train_path, train_label,

transforms.Compose([#

? ? ? ? ? ? ? ? ? ? transforms.Resize((64,128)),#h*w圖像變換 torchvision.transforms.Resize(size, interpolation=2)功能:重置圖像分辨率

? ? ? ? ? ? ? ? ? ? transforms.RandomCrop((60,120)),#隨機裁剪

? ? ? ? ? ? ? ? ? ? transforms.ColorJitter(0.3,0.3,0.2),#修改亮度、對比度和飽和度:transforms.ColorJitter

? ? ? ? ? ? ? ? ? ? transforms.RandomRotation(45),#隨機旋轉

? ? ? ? ? ? ? ? ? ? transforms.ToTensor(),#圖像變換,轉為tensor,并歸一化至[0-1]

? ? ? ? ? ? ? ? ? ? transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])#圖像變換,標準化

? ? ])),#dataset,#數據加載

? ? batch_size=40,#batch_size(int,optional) - 每個批次要加載的樣本數量(默認值:)1。

? ? shuffle=True,#shuffle(bool,optional) - 設置為True在每個重新調整數據(默認值:) False。

? ? num_workers=5,#num_workers(int,optional) - 用于數據加載的子進程數。0表示數據將加載到主進程中。(默認值:0)

)

val_path = glob.glob('C:\\Users\\Zhyang\\Desktop\\match\\mchar_val\\*.png')

val_path.sort()

val_json = json.load(open(r'C:\Users\Zhyang\Desktop\match\mchar_val.json'))

val_label = [val_json[x]['label']for xin val_json]

# print(len(val_path), len(val_label))

val_loader = torch.utils.data.DataLoader(

SVHNDataset(val_path, val_label,

transforms.Compose([

transforms.Resize((64,128)),

# h*w圖像變換 torchvision.transforms.Resize(size, interpolation=2)功能:重置圖像分辨率

? ? ? ? ? ? ? ? ? ? transforms.RandomCrop((60,120)),# 隨機裁剪

? ? ? ? ? ? ? ? ? ? transforms.ColorJitter(0.3,0.3,0.2),# 修改亮度、對比度和飽和度:transforms.ColorJitter

? ? ? ? ? ? ? ? ? ? transforms.RandomRotation(45),# 隨機旋轉

? ? ? ? ? ? ? ? ? ? transforms.ToTensor(),# 圖像變換,轉為tensor,并歸一化至[0-1]

? ? ? ? ? ? ? ? ? ? transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])# 圖像變換,標準化

? ? ? ? ? ? ? ? ])),# dataset,#數據加載

? ? batch_size=40,

shuffle=False,

num_workers=5,

)

class SVHN_Model1(nn.Module):

def __init__(self):

super(SVHN_Model1,self).__init__()

model_conv = models.resnet18(pretrained=True)

model_conv.avgpool = nn.AdaptiveAvgPool2d(1)

model_conv = nn.Sequential(*list(model_conv.children())[:-1])

self.cnn = model_conv

self.fc1 = nn.Linear(512,11)

self.fc2 = nn.Linear(512,11)

self.fc3 = nn.Linear(512,11)

self.fc4 = nn.Linear(512,11)

self.fc5 = nn.Linear(512,11)

def forward(self, img):

feat =self.cnn(img)

# print(feat.shape)

? ? ? ? feat = feat.view(feat.shape[0], -1)

c1 =self.fc1(feat)

c2 =self.fc2(feat)

c3 =self.fc3(feat)

c4 =self.fc4(feat)

c5 =self.fc5(feat)

return c1, c2, c3, c4, c5

def train(train_loader, model, criterion, optimizer,epoch):#模型,準則,優化器,時代

? ? # 切換模型為訓練模式

? ? model.train()

train_loss = []

for i, (input, target)in enumerate(train_loader):#enumerate() 函數用于將一個可遍歷的數據對象(如列表、元組或字符串)組合為一個索引序列,同時列出數據和數據下標,一般用在 for 循環當中。

? ? ? ? # print('train',i)

? ? ? ? if use_cuda:

input = input.cuda()

target = target.cuda()

target=target.long()

c0, c1, c2, c3, c4 = model(input)

loss = criterion(c0, target[:,0]) + \

criterion(c1, target[:,1]) + \

criterion(c2, target[:,2]) + \

criterion(c3, target[:,3]) + \

criterion(c4, target[:,4])

# loss /= 6

? ? ? ? optimizer.zero_grad()#意思是把梯度置零,也就是把loss關于weight的導數變成0.

? ? ? ? loss.backward()#利用backward()方法進行梯度求解

? ? ? ? optimizer.step()#用了optimizer.step(),模型才會更新

? ? ? ? train_loss.append(loss.item())#把字典中每對key和value組成一個元組,并把這些元組放在列表中返回。

? ? return np.mean(train_loss)

def validate(val_loader, model, criterion):

# 切換模型為預測模型

? ? model.eval()

val_loss = []

# 不記錄模型梯度信息

? ? with torch.no_grad():

for i, (input, target)in enumerate(val_loader):

# print('validate',i)

? ? ? ? ? ? if use_cuda:

input = input.cuda()

target = target.cuda()

target = target.long()

c0, c1, c2, c3, c4 = model(input)

loss = criterion(c0, target[:,0]) + \

criterion(c1, target[:,1]) + \

criterion(c2, target[:,2]) + \

criterion(c3, target[:,3]) + \

criterion(c4, target[:,4])

# loss /= 6

? ? ? ? ? ? val_loss.append(loss.item())

return np.mean(val_loss)

def predict(test_loader, model, tta=10):

model.eval()

test_pred_tta =None

? ? # TTA 次數

? ? for _in range(tta):

test_pred = []

with torch.no_grad():

for i, (input, target)in enumerate(test_loader):

if use_cuda:

input = input.cuda()

c0, c1, c2, c3, c4 = model(input)

if use_cuda:

output = np.concatenate([

c0.data.cpu().numpy(),

c1.data.cpu().numpy(),

c2.data.cpu().numpy(),

c3.data.cpu().numpy(),

c4.data.cpu().numpy()],axis=1)

else:

output = np.concatenate([

c0.data.numpy(),

c1.data.numpy(),

c2.data.numpy(),

c3.data.numpy(),

c4.data.numpy()],axis=1)

test_pred.append(output)

test_pred = np.vstack(test_pred)

if test_pred_ttais None:

test_pred_tta = test_pred

else:

test_pred_tta += test_pred

return test_pred_tta

#生成模型

def mode_take():

best_loss=10

? ? for epochin range(10):

train_loss = train(train_loader, model, criterion, optimizer, epoch)

val_loss = validate(val_loader, model, criterion)

val_label = [''.join(map(str, x))for xin val_loader.dataset.img_label]

print('val_label',val_label[20])

val_predict_label = predict(val_loader, model,1)

val_predict_label = np.vstack([

val_predict_label[:, :11].argmax(1),

val_predict_label[:,11:22].argmax(1),

val_predict_label[:,22:33].argmax(1),

val_predict_label[:,33:44].argmax(1),

val_predict_label[:,44:55].argmax(1),

]).T

val_label_pred = []

for xin val_predict_label:

val_label_pred.append(''.join(map(str, x[x !=10])))

print('val_label_pred',val_label_pred[:20])

val_char_acc = np.mean(np.array(val_label_pred) == np.array(val_label))

print('Epoch: {0}, Train loss: {1} \t Val loss: {2}'.format(epoch, train_loss, val_loss))

print('Val Acc', val_char_acc)

# 記錄下驗證集精度

? ? ? ? if val_loss < best_loss:

best_loss = val_loss

print('Find better model in Epoch {0}, saving model.'.format(epoch))

torch.save(model.state_dict(),'C:\\Users\\Zhyang\\Desktop\\match\\model.pt')

break

#預測并生成提交文件

def produce():

test_path = glob.glob('C:\\Users\\Zhyang\\Desktop\\match\\mchar_test_a\\*.png')

test_path.sort()

test_label = [[1]] *len(test_path)

print(len(test_path),len(test_label))

test_loader = torch.utils.data.DataLoader(

SVHNDataset(test_path, test_label,

transforms.Compose([

transforms.Resize((70,140)),

transforms.RandomCrop((60,120)),

transforms.ColorJitter(0.3,0.3,0.2),

transforms.RandomRotation(5),

transforms.ToTensor(),

transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225])

])),

batch_size=40,

shuffle=False,

num_workers=5,

)

# 加載保存的最優模型

? ? model.load_state_dict(torch.load('C:\\Users\\Zhyang\\Desktop\\match\\model.pt'))

test_predict_label = predict(test_loader, model,1)

print(test_predict_label[:, :11].argmax(1))

print(test_predict_label.shape)

test_label = [''.join(map(str, x))for xin test_loader.dataset.img_label]

test_predict_label = np.vstack([#vstack(tup) ,參數tup可以是元組,列表,或者numpy數組,返回結果為numpy的數組。

? ? ? ? test_predict_label[:, :11].argmax(1),

test_predict_label[:,11:22].argmax(1),

test_predict_label[:,22:33].argmax(1),

test_predict_label[:,33:44].argmax(1),

test_predict_label[:,44:55].argmax(1),

]).T

print('test_predict_label',test_predict_label)

test_label_pred = []

for xin test_predict_label:

test_label_pred.append(''.join(map(str, x[x !=10])))

print('test_label_pred ',test_label_pred )

print(type(test_label_pred))

import pandasas pd

df_submit = pd.read_csv('C:\\Users\\Zhyang\\Desktop\\match\\mchar_sample_submit_A.csv')

df_submit['file_code'] = test_label_pred

df_submit.to_csv('C:\\Users\\Zhyang\\Desktop\\match\\mchar_sample_submit_A.csv',index=None)

#訓練與驗證

if __name__ =='__main__':

model = SVHN_Model1()#

? ? criterion = nn.CrossEntropyLoss()#損失函數用

? ? optimizer = torch.optim.Adam(model.parameters(),0.001)# torch.optim.Adam實現Adam算法,model.parameters()獲取網絡的參數

? ? # params (iterable) – 待優化參數的iterable或者是定義了參數組的dict,lr (float, 可選) – 學習率(默認:1e-3)

? ? best_loss =1000.0

? ? # 是否使用GPU

? ? use_cuda =True

? ? if use_cuda:

model = model.cuda()

mode_take()

# produce()

?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。