前言:
入门pytorch是一个既痛苦又快乐的过程。对于我这样非人工智能专业,且python编程能力差的人来说,就更显困难了。但是从配置环境开始,我就遇到了一系列的难题,尤其是pytorch的安装、anaconda与pycharm的适配就卡了我好几个月,曾经一度崩溃,想要放弃。但是只要环境配置成功,学习进度就能飞速推进。成功实现一个pytorch框架下简单的CNN图片二分类模型,能够让我有继续学习下去的动力。文章如有错漏,请指正。
特别鸣谢:
我的入门学习是观看小土堆大佬的视频,代码部分参考了龙良曲老师的视频
一、大致流程:
二、训练代码实现
一个完整的pytorch CNN模型搭建项目,必须至少有以下几个文件:
①dataset(训练图片文件夹,内有训练图片)
②train.py(模型训练代码 ,用于模型的生成)
③test.py(模型测试代码,对我们的模型进行测试或使用的代码)
2.1 数据处理
2.1.1 现成数据集
pytorch官方文档已有,不多赘述。
2.1.2 自定义数据集
①数据集准备和库的引入
我们将图片标签命名为文件夹名,将该类图片放入其中,如图
from torch import nn import time import csv import glob import os import random import torch from torch.utils.data import Dataset, DataLoader from torch.utils.tensorboard import SummaryWriter from torchvision import transforms from PIL import Image #定义训练设备 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
②数据集重整
pytorch数据集需要定义两个类:__len__和__getitem__,前者返回数据集长度,后者返回图片及其标签
需要注意的是,我们的CNN模型对输入的图片有一定的格式要求,应该将图片的大小进行resize操作
class data_remake(Dataset): def __init__(self, root, resize, mode): super(data_remake, self).__init__() #文件根目录 self.root = root #图片格式重整 self.resize = resize #对文件label进行编码 self.name_to_label = {} #将label转换为数字列表 for name in sorted(os.listdir(os.path.join(root))): if not os.path.isdir(os.path.join(root, name)): continue self.name_to_label[name] = len(self.name_to_label.keys()) print(self.name_to_label) #将image, label转换为表格文件 self.images, self.labels = self.load_csv('images.csv') #对数据集进行划分:train->0%-60%, val->60%-80% if mode == 'train':#60% = 0%-60% self.images = self.images[:int(0.6 * len(self.images))] self.labels = self.labels[:int(0.6 * len(self.labels))] elif mode == 'val':#20% = 60%-80% self.images = self.images[int(0.6 * len(self.images)):int(0.8 * len(self.images))] self.labels = self.labels[int(0.6 * len(self.labels)):int(0.8 * len(self.labels))] else: self.images = self.images[int(0.8 * len(self.images)):] self.labels = self.labels[int(0.8 * len(self.labels)):] def load_csv(self, filename): if not os.path.exists(os.path.join(self.root, filename)): images = [] for name in self.name_to_label.keys(): #'dataset\train\000.jpg images +=glob.glob(os.path.join(self.root, name, '*.jpg')) images += glob.glob(os.path.join(self.root, name, '*.png')) print(len(images), images) #243 'dataset/train\ants\0013035.jpg' random.shuffle(images) with open(os.path.join(self.root, filename), mode='w', newline='') as f: writer = csv.writer(f) for img in images: #dataset/train\ants\0013035.jpg name = img.split(os.sep)[-2] label = self.name_to_label[name] # 'dataset/train\ants\0013035.jpg', 0 writer.writerow([img, label]) print('writen into csv file', filename) #read from csv images, labels = [], [] with open(os.path.join(self.root, filename)) as f: reader = csv.reader(f) for row in reader: img, label = row label = int(label) images.append(img) labels.append(label) assert len(images) == len(labels) return images, labels def __len__(self): return len(self.images) def __getitem__(self, idx): #img = 'dataset/train\ants\0013035.jpg' #label = 0 img, label = self.images[idx], self.labels[idx] tf = transforms.Compose([ lambda x:Image.open(x).convert('RGB'),#string path -> image data transforms.Resize((self.resize, self.resize)), transforms.ToTensor(), ]) img = tf(img) label = torch.tensor(label) return img, label
③Dataset,Dataloader
#dataset、测试集长度 train_data = data_remake('dataset/train', 32, mode='train') test_data = data_remake('dataset/train', 32, mode='val') train_data_size = len(train_data) test_data_size = len(test_data) print('训练数据集的长度为:{}'.format(train_data_size)) print('测试数据集的长度为:{}'.format(test_data_size)) #dataloader加载数据集 train_dataloader = DataLoader(train_data, batch_size=16) test_dataloader = DataLoader(test_data, batch_size=16)
2.2 模型搭建
#创建网络模型 class Han(nn.Module): def __init__(self): super(Han, self).__init__() self.model = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2), #padding需根据model结构和公式计算得出 nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Flatten(), nn.Linear(in_features=64*4*4, out_features=64), nn.Linear(in_features=64, out_features=2) ) def forward(self, input): input = self.model(input) return input
2.3 训练代码
我们需要准备:
①损失函数
②优化器
③tensorboard数据可视化
#引入建立好的模型并定义训练设备 han = Han() han = han.to(device) #损失函数 loss_fn = nn.CrossEntropyLoss() loss_fn = loss_fn.to(device) #优化器 # learning_rate = 1e-2 learning_rate = 0.01 optimizer = torch.optim.SGD(han.parameters(), lr=learning_rate) #设置训练网络的参数 #记录训练次数 total_train_step = 0 #记录测试次数 total_test_step = 0 #训练轮数 epoch = int(input('请输入训练轮数:')) #添加tensorboard可视化 writer = SummaryWriter('train_logs_1') #记录训练时间 start_time = time.time() #每一轮训练循环 for i in range(epoch): print('-------第{}轮训练开始-------'.format(i+1)) #单次训练步骤 for data in train_dataloader: #导入训练图片及其标签 imgs, targets = data #将图片及其标签导入GPU加速 imgs = imgs.to(device) targets = targets.to(device) #使用模型 output = han(imgs) #损失函数 loss = loss_fn(output, targets) #优化器优化模型 optimizer.zero_grad() #反向传播 loss.backward() optimizer.step() total_train_step += 1 #输出训练信息,每100个数据输出一次 if total_train_step % 100 == 0: end_time = time.time() print('训练时间:{}'.format(end_time - start_time)) print('训练次数:{}, Loss:{}'.format(total_train_step, loss.item())) writer.add_scalar('train_loss', loss.item(), total_train_step) #每轮测试步骤 total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): for data in test_dataloader: imgs, targets = data imgs = imgs.to(device) targets = targets.to(device) output = han(imgs) loss = loss_fn(output, targets) total_test_loss += loss.item() accuracy = (output.argmax(1) == targets).sum() total_accuracy += accuracy print('整体测试集Loss:{}'.format(total_test_loss)) print('整体测试集正确率:{}'.format(total_accuracy/test_data_size)) writer.add_scalar('test_loss', total_test_loss, total_test_step) writer.add_scalar('test_accuracy', total_accuracy/test_data_size, total_test_step) total_test_step += 1 #保存模型 torch.save(han, 'Han_{}_GPU.pth'.format(epoch)) print('模型已保存') writer.close()
三、模型训练
直接运行训练代码,输入想要的训练轮数
训练后,发现多了images.csv(记录图片和标签对应关系的表格)和Han_{轮数}_GPU.pth(训练好的模型)
四、测试/使用代码实现
import torch import torchvision from PIL import Image from torch import nn #测试/使用的单张图片路径 imade_path = input('请输入图片路径:') image = Image.open(imade_path) #png为四通道,需转化为三通道 image = image.convert('RGB') #图片格式转化 transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)), torchvision.transforms.ToTensor()]) image = transform(image) #模型校对 class Han(nn.Module): def __init__(self): super(Han, self).__init__() self.model = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2), #padding需根据model结构和公式计算得出 nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Flatten(), nn.Linear(in_features=64*4*4, out_features=64), nn.Linear(in_features=64, out_features=2) ) def forward(self, input): input = self.model(input) return input #模型加载 model = torch.load('Han_100_GPU.pth') #三维张量转化为四维张量 image = torch.reshape(image, (1, 3, 32, 32)) image = image.cuda() #模型无梯度下降的使用,即不改变训练好的模型的参数 model.eval() with torch.no_grad(): output = model(image) #将数字标签转化为人话 idx_to_class = ['ants', 'bees'] print('It is', idx_to_class[output.argmax(1).item()], '!')
五、可视化
打开终端,使用tensorboard --logdir={日志文件夹},点击网页连接,即可可视化训练数据
六、测试/使用
我们浏览器搜索蜜蜂或蚂蚁图片,随便截图作为输入
输入图片路径(注意路径首尾不带“”号),发现模型识别为bees!