代码拉取完成,页面将自动刷新
from torch import nn
import numpy as np
import utils
import excitability_modules as em
import math
class fc_layer(nn.Module):
'''Fully connected layer, with possibility of returning "pre-activations".
Input: [batch_size] x ... x [in_size] tensor
Output: [batch_size] x ... x [out_size] tensor'''
def __init__(self, in_size, out_size, nl=nn.ReLU(),
drop=0., bias=True, excitability=False, excit_buffer=False, batch_norm=False, gated=False):
super().__init__()
if drop>0:
self.dropout = nn.Dropout(drop)
self.linear = em.LinearExcitability(in_size, out_size, bias=False if batch_norm else bias,
excitability=excitability, excit_buffer=excit_buffer)
if batch_norm:
self.bn = nn.BatchNorm1d(out_size)
if gated:
self.gate = nn.Linear(in_size, out_size)
self.sigmoid = nn.Sigmoid()
if isinstance(nl, nn.Module):
self.nl = nl
elif not nl=="none":
self.nl = nn.ReLU() if nl == "relu" else (nn.LeakyReLU() if nl == "leakyrelu" else utils.Identity())
def forward(self, x, return_pa=False):
input = self.dropout(x) if hasattr(self, 'dropout') else x
pre_activ = self.bn(self.linear(input)) if hasattr(self, 'bn') else self.linear(input)
gate = self.sigmoid(self.gate(x)) if hasattr(self, 'gate') else None
gated_pre_activ = gate * pre_activ if hasattr(self, 'gate') else pre_activ
output = self.nl(gated_pre_activ) if hasattr(self, 'nl') else gated_pre_activ
return (output, gated_pre_activ) if return_pa else output
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
return [self.linear, self.gate] if hasattr(self, 'gate') else [self.linear]
class fc_layer_split(nn.Module):
'''Fully connected layer outputting [mean] and [logvar] for each unit.
Input: [batch_size] x ... x [in_size] tensor
Output: tuple with two [batch_size] x ... x [out_size] tensors'''
def __init__(self, in_size, out_size, nl_mean=nn.Sigmoid(), nl_logvar=nn.Hardtanh(min_val=-4.5, max_val=0.),
drop=0., bias=True, excitability=False, excit_buffer=False, batch_norm=False, gated=False):
super().__init__()
self.mean = fc_layer(in_size, out_size, drop=drop, bias=bias, excitability=excitability,
excit_buffer=excit_buffer, batch_norm=batch_norm, gated=gated, nl=nl_mean)
self.logvar = fc_layer(in_size, out_size, drop=drop, bias=False, excitability=excitability,
excit_buffer=excit_buffer, batch_norm=batch_norm, gated=gated, nl=nl_logvar)
def forward(self, x):
return (self.mean(x), self.logvar(x))
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
list = []
list += self.mean.list_init_layers()
list += self.logvar.list_init_layers()
return list
#-----------------------------------------------------------------------------------------------------------#
class MLP(nn.Module):
'''Module for a multi-layer perceptron (MLP).
Input: [batch_size] x ... x [size_per_layer[0]] tensor
Output: (tuple of) [batch_size] x ... x [size_per_layer[-1]] tensor'''
def __init__(self, input_size=1000, output_size=10, layers=2, hid_size=1000, hid_smooth=None, size_per_layer=None,
drop=0, batch_norm=True, nl="relu", bias=True, excitability=False, excit_buffer=False, gated=False,
output='normal'):
'''sizes: 0th=[input], 1st=[hid_size], ..., 1st-to-last=[hid_smooth], last=[output].
[input_size] # of inputs
[output_size] # of units in final layer
[layers] # of layers
[hid_size] # of units in each hidden layer
[hid_smooth] if None, all hidden layers have [hid_size] units, else # of units linearly in-/decreases s.t.
final hidden layer has [hid_smooth] units (if only 1 hidden layer, it has [hid_size] units)
[size_per_layer] None or <list> with for each layer number of units (1st element = number of inputs)
--> overwrites [input_size], [output_size], [layers], [hid_size] and [hid_smooth]
[drop] % of each layer's inputs that is randomly set to zero during training
[batch_norm] <bool>; if True, batch-normalization is applied to each layer
[nl] <str>; type of non-linearity to be used (options: "relu", "leakyrelu", "none")
[gated] <bool>; if True, each linear layer has an additional learnable gate
[output] <str>; if - "normal", final layer is same as all others
- "BCE", final layer has sigmoid non-linearity'''
super().__init__()
self.output = output
# get sizes of all layers
if size_per_layer is None:
hidden_sizes = []
if layers > 1:
if (hid_smooth is not None):
hidden_sizes = [int(x) for x in np.linspace(hid_size, hid_smooth, num=layers-1)]
else:
hidden_sizes = [int(x) for x in np.repeat(hid_size, layers - 1)]
size_per_layer = [input_size] + hidden_sizes + [output_size]
self.layers = len(size_per_layer)-1
# set label for this module
# -determine "non-default options"-label
nd_label = "{drop}{bias}{exc}{bn}{nl}{gate}{out}".format(
drop="" if drop==0 else "-drop{}".format(drop),
bias="" if bias else "-noBias", exc="-exc" if excitability else "", bn="-bn" if batch_norm else "",
nl="-lr" if nl=="leakyrelu" else "", gate="-gated" if gated else "",
out="" if output=="normal" else "-{}".format(output),
)
# -set label
self.label = "MLP({}{})".format(size_per_layer, nd_label) if self.layers>0 else ""
# set layers
for lay_id in range(1, self.layers+1):
# number of units of this layer's input and output
in_size = size_per_layer[lay_id-1]
out_size = size_per_layer[lay_id]
# define and set the fully connected layer
if lay_id==self.layers and output in ("logistic", "gaussian"):
layer = fc_layer_split(
in_size, out_size, bias=bias, excitability=excitability, excit_buffer=excit_buffer, drop=drop,
batch_norm=False, gated=gated,
nl_mean=nn.Sigmoid() if output=="logistic" else utils.Identity(),
nl_logvar=nn.Hardtanh(min_val=-4.5, max_val=0.) if output=="logistic" else utils.Identity(),
)
else:
layer = fc_layer(
in_size, out_size, bias=bias, excitability=excitability, excit_buffer=excit_buffer, drop=drop,
batch_norm=False if (lay_id==self.layers and not output=="normal") else batch_norm, gated=gated,
nl=nn.Sigmoid() if (lay_id==self.layers and not output=="normal") else nl,
)
setattr(self, 'fcLayer{}'.format(lay_id), layer)
# if no layers, add "identity"-module to indicate in this module's representation nothing happens
if self.layers<1:
self.noLayers = utils.Identity()
def forward(self, x):
for lay_id in range(1, self.layers+1):
x = getattr(self, 'fcLayer{}'.format(lay_id))(x)
return x
@property
def name(self):
return self.label
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
list = []
for layer_id in range(1, self.layers+1):
list += getattr(self, 'fcLayer{}'.format(layer_id)).list_init_layers()
return list
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 , 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
def vgg16(class_num):
print('*************num of classes in linear_nets: '+str(class_num))
model = VGG(make_layers(cfg, batch_norm=True),num_classes=class_num)
return model
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。