1 Star 0 Fork 0

zhoub86/wireless_dl

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
system.py 6.29 KB
一键复制 编辑 原始数据 按行查看 历史
gouthamvgk 提交于 2019-04-11 01:55 . update
import torch
import torch.nn as nn
import numpy as np
from torch import optim
import torch.nn.functional as F
import random as rn
import matplotlib.pyplot as plt
from noise import GaussianNoise, Noise_1, Noise_2
import os
class comm_4_1(nn.Module):
def __init__(self, num_sym, num_chan, rate, batch_size = 200, train_snr = 7, hidden_neurons=50):
super(comm_4_1, self).__init__()
self.num_symbols = num_sym
self.num_channels = num_chan
self.Ebno = 10.0**(train_snr/10.0) #db eqivalent
self.std_dev = np.sqrt(1/(2*self.Ebno * rate))
self.lin1 = nn.Linear(self.num_symbols, self.num_symbols)
self.lin2 = nn.Linear(self.num_symbols, hidden_neurons)
self.lin3 = nn.Linear(hidden_neurons, 2)
self.lin_c = nn.Linear(2, self.num_channels*2)
self.norm1 = nn.BatchNorm1d(self.num_channels*2)
self.noise = GaussianNoise((batch_size, self.num_channels * 2), std = self.std_dev)
self.lin4 = nn.Linear(self.num_channels*2, hidden_neurons)
self.lin5 = nn.Linear(hidden_neurons, self.num_symbols)
self.lin6 = nn.Linear(self.num_symbols, self.num_symbols)
#self.softmax = nn.LogSoftmax(dim=1)
def forward(self, inp):
out = self.lin1(inp)
out = F.relu(out)
out = self.lin2(out)
out = F.relu(out)
out = self.lin3(out)
out = F.tanh(out)
out = self.lin_c(out)
out = self.norm1(out)
out = F.tanh(out)
chan_out = self.noise(out)
rec_out = self.lin4(chan_out)
chan_out = F.relu(rec_out)
rec_out = self.lin5(rec_out)
rec_out = F.relu(rec_out)
rec_out = self.lin6(rec_out)
#rec_out = self.softmax(rec_out)
return rec_out
class comm_4_2(nn.Module):
def __init__(self, num_sym, num_chan, rate, batch_size = 200, train_snr = 7, hidden_neurons=50):
super(comm_4_2, self).__init__()
self.num_symbols = num_sym
self.num_channels = num_chan
self.Ebno = 10.0**(train_snr/10.0) #db eqivalent
self.std_dev = np.sqrt(1/(2*self.Ebno * rate))
self.lin1 = nn.Linear(self.num_symbols, self.num_symbols)
self.lin2 = nn.Linear(self.num_symbols, hidden_neurons)
self.lin3 = nn.Linear(hidden_neurons, 2)
self.lin_c = nn.Linear(2, self.num_channels*2)
self.norm1 = nn.BatchNorm1d(self.num_channels*2)
self.noise = Noise_1((batch_size, self.num_channels * 2), std = self.std_dev)
self.lin4 = nn.Linear(self.num_channels*2, hidden_neurons)
self.lin5 = nn.Linear(hidden_neurons, self.num_symbols)
self.lin6 = nn.Linear(self.num_symbols, self.num_symbols)
#self.softmax = nn.LogSoftmax(dim=1)
def forward(self, inp):
out = self.lin1(inp)
out = F.relu(out)
out = self.lin2(out)
out = F.relu(out)
out = self.lin3(out)
out = F.tanh(out)
out = self.lin_c(out)
out = self.norm1(out)
out = F.tanh(out)
chan_out = self.noise(out)
rec_out = self.lin4(chan_out)
chan_out = F.relu(rec_out)
rec_out = self.lin5(rec_out)
rec_out = F.relu(rec_out)
rec_out = self.lin6(rec_out)
#rec_out = self.softmax(rec_out)
return rec_out
class comm_16_1(nn.Module):
def __init__(self, num_sym, num_chan, rate, batch_size = 200, train_snr = 7, hidden_neurons=100):
super(comm_16_1, self).__init__()
self.num_symbols = num_sym
self.num_channels = num_chan
self.Ebno = 10.0**(train_snr/10.0) #db eqivalent
self.std_dev = np.sqrt(1/(2*self.Ebno * rate))
self.lin1 = nn.Linear(self.num_symbols, self.num_symbols)
self.lin2 = nn.Linear(self.num_symbols, hidden_neurons)
self.lin3 = nn.Linear(hidden_neurons, 2)
self.lin_c = nn.Linear(2, self.num_channels*2)
self.norm1 = nn.BatchNorm1d(self.num_channels*2)
self.noise = GaussianNoise((batch_size, self.num_channels * 2), std = self.std_dev)
self.lin4 = nn.Linear(self.num_channels*2, hidden_neurons)
self.lin5 = nn.Linear(hidden_neurons, self.num_symbols)
self.lin6 = nn.Linear(self.num_symbols, self.num_symbols)
#self.softmax = nn.LogSoftmax(dim=1)
def forward(self, inp):
out = self.lin1(inp)
out = F.tanh(out)
out = self.lin2(out)
out = F.tanh(out)
out = self.lin3(out)
out = F.tanh(out)
out = self.lin_c(out)
out = self.norm1(out)
out = F.tanh(out)
chan_out = self.noise(out)
rec_out = self.lin4(chan_out)
chan_out = F.tanh(rec_out)
rec_out = self.lin5(rec_out)
rec_out = F.tanh(rec_out)
rec_out = self.lin6(rec_out)
#rec_out = self.softmax(rec_out)
return rec_out
class comm_16_2(nn.Module):
def __init__(self, num_sym, num_chan, rate, batch_size = 200, train_snr = 7, hidden_neurons=100):
super(comm_16_2, self).__init__()
self.num_symbols = num_sym
self.num_channels = num_chan
self.flag = 0
self.Ebno = 10.0**(train_snr/10.0) #db eqivalent
self.std_dev = np.sqrt(1/(2*self.Ebno * rate))
self.lin1 = nn.Linear(self.num_symbols, self.num_symbols)
self.lin2 = nn.Linear(self.num_symbols, hidden_neurons)
self.lin3 = nn.Linear(hidden_neurons, 2)
self.lin_c = nn.Linear(2, self.num_channels*2)
self.norm1 = nn.BatchNorm1d(self.num_channels*2)
self.noise = Noise_2((batch_size, self.num_channels * 2), std = self.std_dev)
self.lin4 = nn.Linear(self.num_channels*2, hidden_neurons)
self.lin5 = nn.Linear(hidden_neurons, self.num_symbols)
self.lin6 = nn.Linear(self.num_symbols, self.num_symbols)
#self.softmax = nn.LogSoftmax(dim=1)
def forward(self, inp):
out = self.lin1(inp)
out = F.tanh(out)
out = self.lin2(out)
out = F.tanh(out)
out = self.lin3(out)
out = F.tanh(out)
out = self.lin_c(out)
out = self.norm1(out)
out = F.tanh(out)
chan_out = self.noise(out, self.flag%5)
self.flag += 1
rec_out = self.lin4(chan_out)
chan_out = F.tanh(rec_out)
rec_out = self.lin5(rec_out)
rec_out = F.tanh(rec_out)
rec_out = self.lin6(rec_out)
#rec_out = self.softmax(rec_out)
return rec_out
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/zhoub86/wireless_dl.git
git@gitee.com:zhoub86/wireless_dl.git
zhoub86
wireless_dl
wireless_dl
master

搜索帮助

0d507c66 1850385 C8b1a773 1850385