1 Star 0 Fork 2

berryz2007/Transformer Demo

forked from Hauk Zero/Transformer Demo 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
enc.py 1.40 KB
一键复制 编辑 原始数据 按行查看 历史
Hauk Zero 提交于 2024-07-26 17:22 . add all
import utils
from torch import nn
from pe import PositionEncoding
from mha import MultiHeadAttention
from ffn import FFN
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_k, n_head, d_ff, dropout=0.5, device='cpu'):
super().__init__()
self.mha = MultiHeadAttention(d_model, d_k, d_k, n_head, device)
self.ffn = FFN(d_model, d_ff, dropout, device)
def forward(self, x, mask):
# x: (batch_size, n_seq, d_model)
x = self.mha(x, x, x, mask)
return self.ffn(x)
class Encoder(nn.Module):
def __init__(self, n, n_vocab, d_model,
d_k, n_head, d_ff, pad_token=0,
max_len=5000, dropout=0.5, device='cpu'):
super().__init__()
self.pad_token = pad_token
self.device = device
self.embd = nn.Embedding(n_vocab, d_model).to(device)
self.pe = PositionEncoding(d_model, max_len, dropout).to(device)
self.layers = nn.ModuleList([
EncoderLayer(d_model, d_k, n_head, d_ff, dropout, device)
for _ in range(n)
])
def forward(self, x):
# x: (batch_size, n_seq)
pad_mask = utils.get_pad_mask(x, x, self.pad_token).to(self.device)
mask = utils.bool_mask(pad_mask)
# x: (batch_size, n_seq, d_model)
x = self.embd(x)
x = self.pe(x)
for layer in self.layers:
x = layer(x, mask)
return x
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/yoours/transformer-demo.git
git@gitee.com:yoours/transformer-demo.git
yoours
transformer-demo
Transformer Demo
master

搜索帮助