2 Star 1 Fork 0

一路向前/learn

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
12nn_optim.py 2.11 KB
一键复制 编辑 原始数据 按行查看 历史
LY 提交于 2024-09-21 18:46 . 网络构建,损失函数,优化器
import torch.optim
import torchvision.datasets
from torch.nn import CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch import nn
dataset = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
dataloader = DataLoader(dataset, batch_size=1)
class TD(nn.Module):
def __init__(self):
super(TD, self).__init__()
# # 方法1,使用Sequential,将所有操作放到一个model里,
# self.model = Sequential(
# Conv2d(3, 32, 5, padding=2),
# MaxPool2d(2),
# Conv2d(32, 32, 5, padding=2),
# MaxPool2d(2),
# Conv2d(32, 64, 5, padding=2),
# MaxPool2d(2),
# Flatten(),
# Linear(1024, 64),
# Linear(64, 10),
# )
# 方法2,自己定义每一层,在forward里自行调用
self.conv1 = Conv2d(3, 32, 5, padding=2)
self.maxpool1 = MaxPool2d(2)
self.conv2 = Conv2d(32, 32, 5, padding=2)
self.maxpool2 = MaxPool2d(2)
self.conv3 = Conv2d(32, 64, 5, padding=2)
self.maxpool3 = MaxPool2d(2)
self.flaten = Flatten()
self.linear1 = Linear(1024, 64)
self.linear2 = Linear(64, 10)
def forward(self, x):
# # 方法1,直接调用即可
# x = self.model(x)
# 方法2,每一层都需要自行调用
x = self.conv1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.maxpool3(x)
x = self.flaten(x)
x = self.linear1(x)
x = self.linear2(x)
return x
loss = CrossEntropyLoss()
td = TD()
optim = torch.optim.SGD(td.parameters(), lr=0.01)
for epoch in range(20):
running_loss = 0.0
for data in dataloader:
imgs, targets = data
outputs = td(imgs)
res_loss = loss(outputs, targets)
optim.zero_grad()
res_loss.backward()
optim.step()
running_loss += res_loss
print(running_loss)
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/li-zen/learn.git
git@gitee.com:li-zen/learn.git
li-zen
learn
learn
master

搜索帮助