代码拉取完成,页面将自动刷新
import os.path as osp
import fcn
import torch.nn as nn
import torch
import numpy as np
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
class FCN8s(nn.Module):
pretrained_model = \
osp.expanduser('~/data/models/pytorch/fcn8s_from_caffe.pth')
@classmethod
def download(cls):
return fcn.data.cached_download(
url='http://drive.google.com/uc?id=0B9P1L--7Wd2vT0FtdThWREhjNkU',
path=cls.pretrained_model,
md5='dbd9bbb3829a3184913bccc74373afbb',
)
def __init__(self, n_class=21):
super(FCN8s, self).__init__()
# conv1
self.conv1_1 = nn.Conv2d(3, 64, 3, padding=100)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2
# conv2
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4
# conv3
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8
# conv4
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16
# conv5
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/32
# fc6
self.fc6 = nn.Conv2d(512, 4096, 7)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
# fc7
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.score_pool3 = nn.Conv2d(256, n_class, 1)
self.score_pool4 = nn.Conv2d(512, n_class, 1)
self.upscore2 = nn.ConvTranspose2d(
n_class, n_class, 4, stride=2, bias=False)
self.upscore8 = nn.ConvTranspose2d(
n_class, n_class, 16, stride=8, bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(
n_class, n_class, 4, stride=2, bias=False)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = get_upsampling_weight(
m.in_channels, m.out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
pool3 = h # 1/8
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
pool4 = h # 1/16
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore2(h)
upscore2 = h # 1/16
h = self.score_pool4(pool4)
h = h[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]]
score_pool4c = h # 1/16
h = upscore2 + score_pool4c # 1/16
h = self.upscore_pool4(h)
upscore_pool4 = h # 1/8
h = self.score_pool3(pool3)
h = h[:, :,
9:9 + upscore_pool4.size()[2],
9:9 + upscore_pool4.size()[3]]
score_pool3c = h # 1/8
h = upscore_pool4 + score_pool3c # 1/8
h = self.upscore8(h)
h = h[:, :, 31:31 + x.size()[2], 31:31 + x.size()[3]].contiguous()
return h
def copy_params_from_fcn16s(self, fcn16s):
for name, l1 in fcn16s.named_children():
try:
l2 = getattr(self, name)
l2.weight # skip ReLU / Dropout
except Exception:
continue
assert l1.weight.size() == l2.weight.size()
l2.weight.data.copy_(l1.weight.data)
if l1.bias is not None:
assert l1.bias.size() == l2.bias.size()
l2.bias.data.copy_(l1.bias.data)
class FCN8sAtOnce(FCN8s):
pretrained_model = \
osp.expanduser('~/data/models/pytorch/fcn8s-atonce_from_caffe.pth')
@classmethod
def download(cls):
return fcn.data.cached_download(
url='http://drive.google.com/uc?id=0B9P1L--7Wd2vblE1VUIxV1o2d2M',
path=cls.pretrained_model,
md5='bfed4437e941fef58932891217fe6464',
)
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
pool3 = h # 1/8
h = self.relu4_1(self.conv4_1(h))
h = self.relu4_2(self.conv4_2(h))
h = self.relu4_3(self.conv4_3(h))
h = self.pool4(h)
pool4 = h # 1/16
h = self.relu5_1(self.conv5_1(h))
h = self.relu5_2(self.conv5_2(h))
h = self.relu5_3(self.conv5_3(h))
h = self.pool5(h)
h = self.relu6(self.fc6(h))
h = self.drop6(h)
h = self.relu7(self.fc7(h))
h = self.drop7(h)
h = self.score_fr(h)
h = self.upscore2(h)
upscore2 = h # 1/16
h = self.score_pool4(pool4 * 0.01) # XXX: scaling to train at once
h = h[:, :, 5:5 + upscore2.size()[2], 5:5 + upscore2.size()[3]]
score_pool4c = h # 1/16
h = upscore2 + score_pool4c # 1/16
h = self.upscore_pool4(h)
upscore_pool4 = h # 1/8
h = self.score_pool3(pool3 * 0.0001) # XXX: scaling to train at once
h = h[:, :,
9:9 + upscore_pool4.size()[2],
9:9 + upscore_pool4.size()[3]]
score_pool3c = h # 1/8
h = upscore_pool4 + score_pool3c # 1/8
h = self.upscore8(h)
h = h[:, :, 31:31 + x.size()[2], 31:31 + x.size()[3]].contiguous()
return h
def copy_params_from_vgg16(self, vgg16):
features = [
self.conv1_1, self.relu1_1,
self.conv1_2, self.relu1_2,
self.pool1,
self.conv2_1, self.relu2_1,
self.conv2_2, self.relu2_2,
self.pool2,
self.conv3_1, self.relu3_1,
self.conv3_2, self.relu3_2,
self.conv3_3, self.relu3_3,
self.pool3,
self.conv4_1, self.relu4_1,
self.conv4_2, self.relu4_2,
self.conv4_3, self.relu4_3,
self.pool4,
self.conv5_1, self.relu5_1,
self.conv5_2, self.relu5_2,
self.conv5_3, self.relu5_3,
self.pool5,
]
for l1, l2 in zip(vgg16.features, features):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data.copy_(l1.weight.data)
l2.bias.data.copy_(l1.bias.data)
for i, name in zip([0, 3], ['fc6', 'fc7']):
l1 = vgg16.classifier[i]
l2 = getattr(self, name)
l2.weight.data.copy_(l1.weight.data.view(l2.weight.size()))
l2.bias.data.copy_(l1.bias.data.view(l2.bias.size()))
class FCN32s(nn.Module):
pretrained_model = \
osp.expanduser('~/data/models/pytorch/fcn32s_from_caffe.pth')
@classmethod
def download(cls):
return fcn.data.cached_download(
url='http://drive.google.com/uc?id=0B9P1L--7Wd2vM2oya3k0Zlgtekk',
path=cls.pretrained_model,
md5='8acf386d722dc3484625964cbe2aba49',
)
def __init__(self, n_class=21, input_channels=3):
super(FCN32s, self).__init__()
# conv1
self.conv1_1 = nn.Conv2d(input_channels, 64, 3, padding=1)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/2
# conv2
self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/4
# conv3
self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/8
# conv4
self.conv4_1 = nn.Conv2d(256, 512, 3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/16
# conv5
self.conv5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) # 1/32
# fc6
self.fc6 = nn.Conv2d(512, 4096, 3)
self.relu6 = nn.ReLU(inplace=True)
self.drop6 = nn.Dropout2d()
# fc7
self.fc7 = nn.Conv2d(4096, 4096, 1)
self.relu7 = nn.ReLU(inplace=True)
self.drop7 = nn.Dropout2d()
self.score_fr = nn.Conv2d(4096, n_class, 1)
self.upscore = nn.ConvTranspose2d(n_class, n_class, 64, stride=32,
bias=False)
# fc_default
self.fc1 = nn.Conv2d(256, 1024, 3, padding=1)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(1024, n_class, 1)
self.upscore_1 = nn.ConvTranspose2d(n_class, n_class, 16, stride=8,
bias=False)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.zero_()
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = get_upsampling_weight(
m.in_channels, m.out_channels, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, x):
h = x
h = self.relu1_1(self.conv1_1(h))
h = self.relu1_2(self.conv1_2(h))
h = self.pool1(h)
h = self.relu2_1(self.conv2_1(h))
h = self.relu2_2(self.conv2_2(h))
h = self.pool2(h)
h = self.relu3_1(self.conv3_1(h))
h = self.relu3_2(self.conv3_2(h))
h = self.relu3_3(self.conv3_3(h))
h = self.pool3(h)
# h = self.relu4_1(self.conv4_1(h))
# h = self.relu4_2(self.conv4_2(h))
# h = self.relu4_3(self.conv4_3(h))
# h = self.pool4(h)
# h = self.relu5_1(self.conv5_1(h))
# h = self.relu5_2(self.conv5_2(h))
# h = self.relu5_3(self.conv5_3(h))
# h = self.pool5(h)
# h = self.relu6(self.fc6(h))
# h = self.drop6(h)
#
# h = self.relu7(self.fc7(h))
# h = self.drop7(h)
#
# h = self.score_fr(h)
#
# h = self.upscore(h)
# h = h[:, :, 19:19 + x.size()[2], 19:19 + x.size()[3]].contiguous()
h = self.relu1(self.fc1(h))
h = self.fc2(h)
h = self.upscore_1(h)
h = h[:, :, 4:4 + x.size()[2], 4:4 + x.size()[3]].contiguous()
return h
def copy_params_from_vgg16(self, vgg16):
features = [
self.conv1_1, self.relu1_1,
self.conv1_2, self.relu1_2,
self.pool1,
self.conv2_1, self.relu2_1,
self.conv2_2, self.relu2_2,
self.pool2,
self.conv3_1, self.relu3_1,
self.conv3_2, self.relu3_2,
self.conv3_3, self.relu3_3,
self.pool3,
self.conv4_1, self.relu4_1,
self.conv4_2, self.relu4_2,
self.conv4_3, self.relu4_3,
self.pool4,
self.conv5_1, self.relu5_1,
self.conv5_2, self.relu5_2,
self.conv5_3, self.relu5_3,
self.pool5,
]
for l1, l2 in zip(vgg16.features, features):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
for i, name in zip([0, 3], ['fc6', 'fc7']):
l1 = vgg16.classifier[i]
l2 = getattr(self, name)
l2.weight.data = l1.weight.data.view(l2.weight.size())
l2.bias.data = l1.bias.data.view(l2.bias.size())
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。