代码拉取完成,页面将自动刷新
"""
将 优化器 用于网络训练(调优)
torch.optim.SGD :
"""
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, ReLU, Sigmoid, Linear, Flatten, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter("logs/017")
dataset = torchvision.datasets.CIFAR10(root="./visionData", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=1)
# Sequential。使代码更加简洁
class MyNnSe(nn.Module):
def __init__(self):
super(MyNnSe, self).__init__()
self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)
def forward(self, x):
x = self.model1(x)
return x
mySeNn = MyNnSe()
loss = nn.CrossEntropyLoss()
# 构造优化器
optim = torch.optim.SGD(mySeNn.parameters(), lr=0.01)
'''
训练套路分三步:https://blog.csdn.net/weixin_45072810/article/details/109687210
# 先将梯度归零 optimizer.zero_grad()
# 然后反向传播计算得到每个参数的梯度 loss.backward()
# 最后通过梯度下降执行一步参数更新 optimizer.step()
'''
# 循环训练20次,打印输出每一轮的(总)误差
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
imgs, tagets = data
outputs = mySeNn(imgs)
result_loss = loss(outputs, tagets)
optim.zero_grad() # 先将梯度归零
result_loss.backward() # 计算梯度,用于优化器反向调节
optim.step() # 调优
running_loss = running_loss + result_loss
print(running_loss)
writer.close()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。