1 Star 0 Fork 0

kangchi/pytorch2caffe

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
mobilenetv2_fpn.py 8.52 KB
一键复制 编辑 原始数据 按行查看 历史
wanglaotou 提交于 2020-03-18 19:57 . ssd_mobilenetv2_fpn
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn import init
global count
count = 0
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class LinearBottleneck(nn.Module):
# def __init__(self, inplanes, outplanes, stride=1, t=6, activation=nn.ReLU6):
def __init__(self, inplanes, outplanes, stride=1, t=6, activation=nn.ReLU):
super(LinearBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes * t, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(inplanes * t)
self.conv2 = nn.Conv2d(inplanes * t, inplanes * t, kernel_size=3, stride=stride, padding=1, bias=False,
groups=inplanes * t)
self.bn2 = nn.BatchNorm2d(inplanes * t)
self.conv3 = nn.Conv2d(inplanes * t, outplanes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.activation = activation(inplace=True)
self.stride = stride
self.t = t
self.inplanes = inplanes
self.outplanes = outplanes
def forward(self, x):
residual = x
# x = residual
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
# x = out # -> ok
out = self.bn2(out)
out = self.activation(out)
# x = out # -> ok
out = self.conv3(out)
# x = out # -> ok
out = self.bn3(out)
x = out # -> not ok why???
if self.stride == 1 and self.inplanes == self.outplanes:
out += residual
# x = out # -> ok
# print('out bottleneck_2:', x.shape)
# pytorch_bottleneck_2 = open('pytorch_bottleneck_6_after1.txt','a')
# x_md2_np = x.cpu().detach().numpy()
# global count
# count += 1
# if count == 6:
# for i in range(x.shape[0]):
# for j in range(x.shape[1]):
# for m in range(x.shape[2]):
# for n in range(x.shape[3]):
# pytorch_bottleneck_2.write(str(x_md2_np[i][j][m][n])+'\n')
# # pytorch_bottleneck_2.write('\n')
# pytorch_bottleneck_2.write('=====================================\n')
return out
class MobileNet2(nn.Module):
"""MobileNet2 implementation.
"""
# def __init__(self, scale=1.0, input_size=224, t=6, in_channels=3, num_classes=1000, activation=nn.ReLU6):
def __init__(self, scale=1.0, input_size=224, t=6, in_channels=3, num_classes=1000, activation=nn.ReLU):
"""
MobileNet2 constructor.
:param in_channels: (int, optional): number of channels in the input tensor.
Default is 3 for RGB image inputs.
:param input_size:
:param num_classes: number of classes to predict. Default
is 1000 for ImageNet.
:param scale:
:param t:
:param activation:
"""
super(MobileNet2, self).__init__()
self.scale = scale
self.t = t
self.activation_type = activation
self.activation = activation(inplace=True)
self.num_classes = num_classes
# self.num_of_channels = [32, 16, 24, 32, 64, 96, 160, 320]
self.num_of_channels = [32, 16, 24, 32, 64, 128, 160, 320]
# assert (input_size % 32 == 0)
self.c = [_make_divisible(ch * self.scale, 8) for ch in self.num_of_channels]
self.n = [1, 1, 2, 3, 4, 3, 3, 1]
self.s = [2, 1, 2, 2, 2, 1, 2, 1]
self.conv1 = nn.Conv2d(in_channels, self.c[0], kernel_size=3, bias=False, stride=self.s[0], padding=1)
self.bn1 = nn.BatchNorm2d(self.c[0])
self.bottlenecks = self._make_bottlenecks()
# Last convolution has 1280 output channels for scale <= 1
# self.last_conv_out_ch = 1280 if self.scale <= 1 else _make_divisible(1280 * self.scale, 8)
self.last_conv_out_ch = 128 if self.scale <= 1 else _make_divisible(128 * self.scale, 8)
self.conv_last = nn.Conv2d(self.c[-1], self.last_conv_out_ch, kernel_size=1, bias=False)
self.bn_last = nn.BatchNorm2d(self.last_conv_out_ch)
# self.avgpool = nn.AdaptiveAvgPool2d(1)
# self.dropout = nn.Dropout(p=0.2, inplace=True) # confirmed by paper authors
self.fc = nn.Linear(self.last_conv_out_ch, self.num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def _make_stage(self, inplanes, outplanes, n, stride, t, stage):
modules = OrderedDict()
stage_name = "LinearBottleneck{}".format(stage)
# First module is the only one utilizing stride
first_module = LinearBottleneck(inplanes=inplanes, outplanes=outplanes, stride=stride, t=t,
activation=self.activation_type)
modules[stage_name + "_0"] = first_module
# add more LinearBottleneck depending on number of repeats
for i in range(n - 1):
name = stage_name + "_{}".format(i + 1)
module = LinearBottleneck(inplanes=outplanes, outplanes=outplanes, stride=1, t=6,
activation=self.activation_type)
modules[name] = module
return nn.Sequential(modules)
def _make_bottlenecks(self):
modules = OrderedDict()
stage_name = "Bottlenecks"
# First module is the only one with t=1
bottleneck1 = self._make_stage(inplanes=self.c[0], outplanes=self.c[1], n=self.n[1], stride=self.s[1], t=1,
stage=0)
modules[stage_name + "_0"] = bottleneck1
# add more LinearBottleneck depending on number of repeats??? --> add more Bottleneck depending on number of repeats
for i in range(1, len(self.c) - 1):
name = stage_name + "_{}".format(i)
module = self._make_stage(inplanes=self.c[i], outplanes=self.c[i + 1], n=self.n[i + 1],
stride=self.s[i + 1],
t=self.t, stage=i)
modules[name] = module
return nn.Sequential(modules)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activation(x)
x = self.bottlenecks(x)
x = self.conv_last(x)
x = self.bn_last(x)
x = self.activation(x)
return x
# average pooling layer
# x = self.avgpool(x)
# x = self.dropout(x)
# flatten for input to fully-connected layer
# x = x.view(x.size(0), -1)
# x = self.fc(x)
# return F.log_softmax(x, dim=1) #TODO not needed(?)
if __name__ == "__main__":
"""Testing
"""
mobilenet = MobileNet2(scale=1.0)
mobilenet = nn.DataParallel(mobilenet)
# torch.backends.cudnn.benchmark = True
weight = torch.load("weights/mobilenetv2_1.pth",map_location="cuda:0")
mobilenet.load_state_dict(weight['state_dict'])
# x = torch.zeros(32,3,300,300)
# x = mobilenet.conv1(x)
# #print(x.size())
# x = mobilenet.bn1(x)
# #print(x.size())
# x = mobilenet.activation(x)
# #print(x.size())
# for i in mobilenet.bottlenecks:
# x = i(x)
# print(x.size())
# x = mobilenet.conv_last(x)
# x = mobilenet.bn_last(x)
# x = mobilenet.activation(x)
# print(x.size())
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/kangchi/pytorch2caffe.git
git@gitee.com:kangchi/pytorch2caffe.git
kangchi
pytorch2caffe
pytorch2caffe
master

搜索帮助

0d507c66 1850385 C8b1a773 1850385