代码拉取完成,页面将自动刷新
同步操作将从 东方佑/CoupletAI 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
import argparse
from pathlib import Path
from typing import Tuple, List, Mapping
import torch
import torch.nn as nn
from tqdm import trange
import config
from data_load import load_vocab, load_dataset
def create_dataset(seqs: List[List[str]],
tags: List[List[str]],
word_to_ix: Mapping[str, int],
max_seq_len: int,
pad_ix: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Convert List[str] -> torch.Tensor.
Returns:
seqs_tensor: shape=[num_seqs, max_seq_len].
seqs_mask: shape=[num_seqs, max_seq_len].
tags_tesnor: shape=[num_seqs, max_seq_len].
"""
assert len(seqs) == len(tags)
num_seqs = len(seqs)
seqs_tensor = torch.ones(num_seqs, max_seq_len) * pad_ix
seqs_mask = torch.zeros(num_seqs, max_seq_len)
tags_tesnor = torch.ones(num_seqs, max_seq_len) * pad_ix
for i in trange(num_seqs):
seqs_mask[i, : len(seqs[i])] = 1
for j, word in enumerate(seqs[i]):
seqs_tensor[i, j] = word_to_ix.get(word, word_to_ix['[UNK]'])
for j, tag in enumerate(tags[i]):
tags_tesnor[i, j] = word_to_ix.get(tag, word_to_ix['[UNK]'])
return seqs_tensor.long(), seqs_mask, tags_tesnor.long()
def save_dataset(seqs_tensor, seqs_mask, tags_tesnor, path):
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
torch.save(seqs_tensor, path / 'seqs_tensor.pkl')
torch.save(seqs_mask, path / 'seqs_mask.pkl')
torch.save(tags_tesnor, path / 'tags_tesnor.pkl')
def create_attention_mask(raw_mask: torch.Tensor) -> torch.Tensor:
"""Convert mask to attention mask.
"""
extended_attention_mask = raw_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask.float()
def create_transformer_attention_mask(raw_mask: torch.Tensor) -> torch.Tensor:
"""Convert mask to transformer attention mask.
"""
return (1 - raw_mask).bool()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("--dir", default='tensor_dataset', type=str)
parser.add_argument("--max_len", default=32, type=int)
args = parser.parse_args()
vocab_path = f'{config.data_dir}/vocabs'
word_to_ix = load_vocab(vocab_path)
vocab_size = len(word_to_ix)
max_seq_len = args.max_len
# 训练集
# path = f'{config.data_dir}/tensor/train'
# print('预处理训练集, 保存路劲:' + path)
# seq_path = f'{config.data_dir}/train/in.txt' # 上联数据
# tag_path = f'{config.data_dir}/train/out.txt' # 下联数据
# seqs, tags = load_dataset(seq_path, tag_path)
# seqs, masks, tags = create_dataset(seqs, tags, word_to_ix, max_seq_len, word_to_ix['[PAD]'])
# save_dataset(seqs, masks, tags, path)
# print('成功')
# 测试集
path = f'{config.data_dir}/tensor/test'
print('预处理测试集, 保存路劲:' + path)
seq_path = f'{config.data_dir}/test/in.txt' # 上联数据
tag_path = f'{config.data_dir}/test/out.txt' # 下联数据
seqs, tags = load_dataset(seq_path, tag_path)
seqs, masks, tags = create_dataset(seqs, tags, word_to_ix, max_seq_len, word_to_ix['[PAD]'])
# save_dataset(seqs, masks, tags, path)
print('成功')
# 计算最大损失熵
# 损失函数
loss_func = nn.CrossEntropyLoss(ignore_index=word_to_ix['[PAD]'])
# 计算等概出现候选字时, 交互熵
logits = torch.full((32, vocab_size), 1/vocab_size)
i = 6
output = torch.zeros((1, 32), dtype=torch.int64)
seq = torch.arange(4, i + 4)
output[0, 0: i] = seq
loss = loss_func(logits, output.view(-1))
print(loss)
res = torch.tensor([loss])
path = f'{config.data_dir}/tensor'
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
torch.save(res, path / 'max_entropy_tensor.pkl')
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。