如何看网站的建站时间企业网站 手机站
news/
2025/9/24 4:39:35/
文章来源:
如何看网站的建站时间,企业网站 手机站,池州市网站建设优化,网站建设个人简历表达文章目录
基于pytorch的LSTM进行字符集文本生成前言一、数据集二、代码实现 1.到入库和LSTM进行模型构建2.数据预处理函数3.训练函数4.预测函数5.文本生成函数6.主函数完整代码总结 前言
本文介绍了机器学习中深度学习的内容使用pytorch构建LSTM模型进行字符级文本生成任务 一…文章目录
基于pytorch的LSTM进行字符集文本生成前言一、数据集二、代码实现 1.到入库和LSTM进行模型构建2.数据预处理函数3.训练函数4.预测函数5.文本生成函数6.主函数完整代码总结 前言
本文介绍了机器学习中深度学习的内容使用pytorch构建LSTM模型进行字符级文本生成任务 一、数据集
https://download.csdn.net/download/qq_52785473/78428834 二、代码实现
1.导入库及LSTM模型构建
代码如下
# coding: utf-8
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
import torch.nn.functional as Fclass lstm_model(nn.Module):def __init__(self, vocab, hidden_size, num_layers, dropout0.5):super(lstm_model, self).__init__()self.vocab vocab # 字符数据集# 索引字符self.int_char {i: char for i, char in enumerate(vocab)}self.char_int {char: i for i, char in self.int_char.items()}# 对字符进行one-hot encodingself.encoder OneHotEncoder(sparseTrue).fit(vocab.reshape(-1, 1))self.hidden_size hidden_sizeself.num_layers num_layers# lstm层self.lstm nn.LSTM(len(vocab), hidden_size, num_layers, batch_firstTrue, dropoutdropout)# 全连接层self.linear nn.Linear(hidden_size, len(vocab))def forward(self, sequence, hsNone):out, hs self.lstm(sequence, hs) # lstm的输出格式batch_size, sequence_length, hidden_sizeout out.reshape(-1, self.hidden_size) # 这里需要将out转换为linear的输入格式即batch_size * sequence_length, hidden_sizeoutput self.linear(out) # linear的输出格式(batch_size * sequence_length, vocab_size)return output, hsdef onehot_encode(self, data): # 对数据进行编码return self.encoder.transform(data)def onehot_decode(self, data): # 对数据进行解码return self.encoder.inverse_transform(data)def label_encode(self, data): # 对标签进行编码return np.array([self.char_int[ch] for ch in data])def label_decode(self, data): # 对标签进行解码return np.array([self.int_char[ch] for ch in data]) 2.数据预处理函数
def get_batches(data, batch_size, seq_len)::param data: 源数据输入格式(num_samples, num_features):param batch_size: batch的大小:param seq_len: 序列的长度精度:return: batch_size, seq_len, num_featuresnum_features data.shape[1]num_chars batch_size * seq_len # 一个batch_size的长度num_batches int(np.floor(data.shape[0] / num_chars)) # 计算出有多少个batchesneed_chars num_batches * num_chars # 计算出需要的总字符量targets np.vstack((data[1:].A, data[0].A)) # 可能版本问题取成numpy比较好reshapeinputs data[:need_chars].A.astype(int) # 从原始数据data中截取所需的字符数量need_wordstargets targets[:need_chars]targets targets.reshape(batch_size, -1, num_features)inputs inputs.reshape(batch_size, -1, num_features)for i in range(0, inputs.shape[1], seq_len):x inputs[:, i: iseq_len]y targets[:, i: iseq_len]yield x, y # 节省内存 3.训练函数
def train(model, data, batch_size, seq_len, epochs, lr0.01, validNone):device cuda if torch.cuda.is_available() else cpumodel model.to(device)optimizer torch.optim.Adam(model.parameters(), lrlr)criterion nn.CrossEntropyLoss()if valid is not None:data model.onehot_encode(data.reshape(-1, 1))valid model.onehot_encode(valid.reshape(-1, 1))else:data model.onehot_encode(data.reshape(-1, 1))train_loss []val_loss []for epoch in range(epochs):model.train()hs None # hs等于hidden_size隐藏层节点train_ls 0.0val_ls 0.0for x, y in get_batches(data, batch_size, seq_len):optimizer.zero_grad()x torch.tensor(x).float().to(device)out, hs model(x, hs)hs ([h.data for h in hs])y y.reshape(-1, len(model.vocab))y model.onehot_decode(y)y model.label_encode(y.squeeze())y torch.from_numpy(y).long().to(device)loss criterion(out, y.squeeze())loss.backward()optimizer.step()train_ls loss.item()if valid is not None:model.eval()hs Nonewith torch.no_grad():for x, y in get_batches(valid, batch_size, seq_len):x torch.tensor(x).float().to(device) # x为一组测试数据包含batch_size * seq_len个字out, hs model(x, hs)# out.shape输出为tensor[batch_size * seq_len, vocab_size]hs ([h.data for h in hs]) # 更新参数y y.reshape(-1, len(model.vocab)) # y.shape为(128,100,43)因此需要转成两维每行就代表一个字了43为字典大小y model.onehot_decode(y) # y标签即为测试数据各个字的下一个字进行one_hot解码即变为字符# 但是此时y 是[[..],[..]]形式y model.label_encode(y.squeeze()) # 因此需要去掉一维才能成功解码# 此时y为[12...]成为一维的数组每个代表自己字典里对应字符的字典序y torch.from_numpy(y).long().to(device)# 这里y和y.squeeze()出来的东西一样可能这里没啥用不太懂loss criterion(out, y.squeeze()) # 计算损失值val_ls loss.item()val_loss.append(np.mean(val_ls))train_loss.append(np.mean(train_ls))print(train_loss:, train_ls)plt.plot(train_loss, labeltrain_loss)plt.plot(val_loss, labelval loss)plt.title(loop vs epoch)plt.legend()plt.show()model_name lstm_model.netwith open(model_name, wb) as f: # 训练完了保存模型torch.save(model.state_dict(), f)4.预测函数
def predict(model, char, top_kNone, hidden_sizeNone):device cuda if torch.cuda.is_available() else cpumodel.to(device)model.eval() # 固定参数with torch.no_grad():char np.array([char]) # 输入一个字符预测下一个字是什么先转成numpychar char.reshape(-1, 1) # 变成二维才符合编码规范char_encoding model.onehot_encode(char).A # 对char进行编码取成numpy比较方便reshapechar_encoding char_encoding.reshape(1, 1, -1) # char_encoding.shape为(1, 1, 43)变成三维才符合模型输入格式char_tensor torch.tensor(char_encoding, dtypetorch.float32) # 转成tensorchar_tensor char_tensor.to(device)out, hidden_size model(char_tensor, hidden_size) # 放入模型进行预测out为结果probs F.softmax(out, dim1).squeeze() # 计算预测值,即所有字符的概率if top_k is None: # 选择概率最大的top_k个indices np.arange(vocab_size)else:probs, indices probs.topk(top_k)indices indices.cpu().numpy()probs probs.cpu().numpy()char_index np.random.choice(indices, pprobs/probs.sum()) # 随机选择一个字符索引作为预测值char model.int_char[char_index] # 通过索引找出预测字符return char, hidden_size5.文本生成函数
def sample(model, length, top_kNone, sentencec):hidden_size Nonenew_sentence [char for char in sentence]for i in range(length):next_char, hidden_size predict(model, new_sentence[-1], top_ktop_k, hidden_sizehidden_size)new_sentence.append(next_char)return .join(new_sentence)6.主函数
def main():hidden_size 512num_layers 2batch_size 128seq_len 100epochs 2lr 0.01f pd.read_csv(../datasets/dev.tsv, sep\t, headerNone)f f[0]text list(f)text ..join(text)vocab np.array(sorted(set(text))) # 建立字典vocab_size len(vocab)val_len int(np.floor(0.2 * len(text))) # 划分训练测试集trainset np.array(list(text[:-val_len]))validset np.array(list(text[-val_len:]))model lstm_model(vocab, hidden_size, num_layers) # 模型实例化train(model, trainset, batch_size, seq_len, epochs, lrlr, validvalidset) # 训练模型model.load_state_dict(torch.load(lstm_model.net)) # 调用保存的模型new_text sample(model, 100, top_k5) # 预测模型生成100个字符,预测时选择概率最大的前5个print(new_text) # 输出预测文本if __name__ __main__:main() 本代码还是有很大改进空间例如进行词语级的文本生成以及使用word2vec等引入词向量等都可以是的模型获得更好的效果。 完整代码
# coding: utf-8
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
import torch.nn.functional as Fclass lstm_model(nn.Module):def __init__(self, vocab, hidden_size, num_layers, dropout0.5):super(lstm_model, self).__init__()self.vocab vocab # 字符数据集# 索引字符self.int_char {i: char for i, char in enumerate(vocab)}self.char_int {char: i for i, char in self.int_char.items()}# 对字符进行one-hot encodingself.encoder OneHotEncoder(sparseTrue).fit(vocab.reshape(-1, 1))self.hidden_size hidden_sizeself.num_layers num_layers# lstm层self.lstm nn.LSTM(len(vocab), hidden_size, num_layers, batch_firstTrue, dropoutdropout)# 全连接层self.linear nn.Linear(hidden_size, len(vocab))def forward(self, sequence, hsNone):out, hs self.lstm(sequence, hs) # lstm的输出格式batch_size, sequence_length, hidden_sizeout out.reshape(-1, self.hidden_size) # 这里需要将out转换为linear的输入格式即batch_size * sequence_length, hidden_sizeoutput self.linear(out) # linear的输出格式(batch_size * sequence_length, vocab_size)return output, hsdef onehot_encode(self, data):return self.encoder.transform(data)def onehot_decode(self, data):return self.encoder.inverse_transform(data)def label_encode(self, data):return np.array([self.char_int[ch] for ch in data])def label_decode(self, data):return np.array([self.int_char[ch] for ch in data])def get_batches(data, batch_size, seq_len)::param data: 源数据输入格式(num_samples, num_features):param batch_size: batch的大小:param seq_len: 序列的长度精度:return: batch_size, seq_len, num_featuresnum_features data.shape[1]num_chars batch_size * seq_len # 一个batch_size的长度num_batches int(np.floor(data.shape[0] / num_chars)) # 计算出有多少个batchesneed_chars num_batches * num_chars # 计算出需要的总字符量targets np.vstack((data[1:].A, data[0].A)) # 可能版本问题取成numpy比较好reshapeinputs data[:need_chars].A.astype(int) # 从原始数据data中截取所需的字符数量need_wordstargets targets[:need_chars]targets targets.reshape(batch_size, -1, num_features)inputs inputs.reshape(batch_size, -1, num_features)for i in range(0, inputs.shape[1], seq_len):x inputs[:, i: iseq_len]y targets[:, i: iseq_len]yield x, y # 节省内存def train(model, data, batch_size, seq_len, epochs, lr0.01, validNone):device cuda if torch.cuda.is_available() else cpumodel model.to(device)optimizer torch.optim.Adam(model.parameters(), lrlr)criterion nn.CrossEntropyLoss()if valid is not None:data model.onehot_encode(data.reshape(-1, 1))valid model.onehot_encode(valid.reshape(-1, 1))else:data model.onehot_encode(data.reshape(-1, 1))train_loss []val_loss []for epoch in range(epochs):model.train()hs None # hs等于hidden_size隐藏层节点train_ls 0.0val_ls 0.0for x, y in get_batches(data, batch_size, seq_len):optimizer.zero_grad()x torch.tensor(x).float().to(device)out, hs model(x, hs)hs ([h.data for h in hs])y y.reshape(-1, len(model.vocab))y model.onehot_decode(y)y model.label_encode(y.squeeze())y torch.from_numpy(y).long().to(device)loss criterion(out, y.squeeze())loss.backward()optimizer.step()train_ls loss.item()if valid is not None:model.eval()hs Nonewith torch.no_grad():for x, y in get_batches(valid, batch_size, seq_len):x torch.tensor(x).float().to(device) # x为一组测试数据包含batch_size * seq_len个字out, hs model(x, hs)# out.shape输出为tensor[batch_size * seq_len, vocab_size]hs ([h.data for h in hs]) # 更新参数y y.reshape(-1, len(model.vocab)) # y.shape为(128,100,43)因此需要转成两维每行就代表一个字了43为字典大小y model.onehot_decode(y) # y标签即为测试数据各个字的下一个字进行one_hot解码即变为字符# 但是此时y 是[[..],[..]]形式y model.label_encode(y.squeeze()) # 因此需要去掉一维才能成功解码# 此时y为[12...]成为一维的数组每个代表自己字典里对应字符的字典序y torch.from_numpy(y).long().to(device)# 这里y和y.squeeze()出来的东西一样可能这里没啥用不太懂loss criterion(out, y.squeeze()) # 计算损失值val_ls loss.item()val_loss.append(np.mean(val_ls))train_loss.append(np.mean(train_ls))print(train_loss:, train_ls)plt.plot(train_loss, labeltrain_loss)plt.plot(val_loss, labelval loss)plt.title(loop vs epoch)plt.legend()plt.show()model_name lstm_model.netwith open(model_name, wb) as f: # 训练完了保存模型torch.save(model.state_dict(), f)def predict(model, char, top_kNone, hidden_sizeNone):device cuda if torch.cuda.is_available() else cpumodel.to(device)model.eval() # 固定参数with torch.no_grad():char np.array([char]) # 输入一个字符预测下一个字是什么先转成numpychar char.reshape(-1, 1) # 变成二维才符合编码规范char_encoding model.onehot_encode(char).A # 对char进行编码取成numpy比较方便reshapechar_encoding char_encoding.reshape(1, 1, -1) # char_encoding.shape为(1, 1, 43)变成三维才符合模型输入格式char_tensor torch.tensor(char_encoding, dtypetorch.float32) # 转成tensorchar_tensor char_tensor.to(device)out, hidden_size model(char_tensor, hidden_size) # 放入模型进行预测out为结果probs F.softmax(out, dim1).squeeze() # 计算预测值,即所有字符的概率if top_k is None: # 选择概率最大的top_k个indices np.arange(vocab_size)else:probs, indices probs.topk(top_k)indices indices.cpu().numpy()probs probs.cpu().numpy()char_index np.random.choice(indices, pprobs/probs.sum()) # 随机选择一个字符索引作为预测值char model.int_char[char_index] # 通过索引找出预测字符return char, hidden_sizedef sample(model, length, top_kNone, sentencec):hidden_size Nonenew_sentence [char for char in sentence]for i in range(length):next_char, hidden_size predict(model, new_sentence[-1], top_ktop_k, hidden_sizehidden_size)new_sentence.append(next_char)return .join(new_sentence)def main():hidden_size 512num_layers 2batch_size 128seq_len 100epochs 2lr 0.01f pd.read_csv(../datasets/dev.tsv, sep\t, headerNone)f f[0]text list(f)text ..join(text)vocab np.array(sorted(set(text))) # 建立字典vocab_size len(vocab)val_len int(np.floor(0.2 * len(text))) # 划分训练测试集trainset np.array(list(text[:-val_len]))validset np.array(list(text[-val_len:]))model lstm_model(vocab, hidden_size, num_layers) # 模型实例化train(model, trainset, batch_size, seq_len, epochs, lrlr, validvalidset) # 训练模型model.load_state_dict(torch.load(lstm_model.net)) # 调用保存的模型new_text sample(model, 100, top_k5) # 预测模型生成100个字符,预测时选择概率最大的前5个print(new_text) # 输出预测文本if __name__ __main__:main() 总结
这个案例他数据预处理的时候一个序列对应一个序列的关系例如abcd对应的标签为dabc而不是一个字符因此可能后面进行了某些操作使得他变成一个字符对应一个字符标签的操作了吧从而使得预测的时候只能通过一个字符预测其后面的字符这就有点失去循环神经网络精髓的味道了感觉是割裂字符之间的关系变成一个普通单纯的分类了。
循环神经网络因为能够处理序列位置的信息需要设定一个滑动窗口值或者说时间步长什么的作用应该就是保留序列特征例如abcdef为训练数据设置滑动窗口为3的话那么按照正常的序列思路可以划分为abc-d、bcd-e、cde-f作为训练数据的形式即连续的三个字符对应的标签为其后面一个字符那么我们训练出来的模型也是需要输入三个字符然后生成一个字符再用预测出来的字符加上他前面两个字符再预测新的字符例如预测的初始序列为abc加入abc预测出来d那么下一次预测就是bcd作为输入就像一个窗口一步一步滑动过去一样窗口的大小就为开始设定的3。
因此对于这个案例它虽然seq_len100即滑动窗口为100但是它的训练数据就不太对而且模型预测时也是一个字符预测下一个字符并没有体现滑动窗口的思想因此这个代码案例大家可以自己进行改进优化。
下面是对代码部分地方的改动使得它能够按滑动窗口的思维来进行训练和预测
数据预处理函数
def get_batches(data, batch_size, seq_len)::param data: 源数据输入格式(num_samples, num_features):param batch_size: batch的大小:param seq_len: 序列的长度精度:return: batch_size, seq_len, num_featuresnum_features data.shape[1]num_chars batch_size * seq_len # 一个batch_size的长度num_batches int(np.floor(data.shape[0] / num_chars)) # 计算出有多少个batchesneed_chars num_batches * num_chars # 计算出需要的总字符量targets np.vstack((data[1:].A, data[0].A)) # 可能版本问题取成numpy比较好reshapeinputs data[:need_chars].A.astype(int) # 从原始数据data中截取所需的字符数量need_wordstargets targets[:need_chars]train_data np.zeros((inputs.shape[0] - seq_len, seq_len, num_features))train_label np.zeros((inputs.shape[0] - seq_len, num_features))for i in range(0, inputs.shape[0] - seq_len, 1):# inputs就是字符数 * 词向量大小表示一个字符# 思路就是abcd中ab-c, bc-d,一共4-31个train_data[i] inputs[i:iseq_len] # 每seq_len100的字符train_label[i] inputs[iseq_len-1] # 训练标签就为他后面那个字符print(train_data.shape)print(train_label.shape)for i in range(0, inputs.shape[0] - seq_len, batch_size): x train_data[i:ibatch_size] # 每batch_size128个一起进行训练更新参数y train_label[i:ibatch_size] # 对应的128个标签print(x.shape)print(y.shape)print(-----------)yield x, y模型构建部分
class lstm_model(nn.Module):def __init__(self, vocab, hidden_size, num_layers, dropout0.5, seq_len100):super(lstm_model, self).__init__()self.seq_len seq_lenself.vocab vocab # 字符数据集# 索引字符self.int_char {i: char for i, char in enumerate(vocab)}self.char_int {char: i for i, char in self.int_char.items()}# 对字符进行one-hot encodingself.encoder OneHotEncoder(sparseTrue).fit(vocab.reshape(-1, 1))self.hidden_size hidden_sizeself.num_layers num_layers# lstm层self.lstm nn.LSTM(len(vocab), hidden_size, num_layers, batch_firstTrue, dropoutdropout)# 全连接层self.linear nn.Linear(hidden_size, len(vocab))def forward(self, sequence, hsNone):# print()# print(forward:, sequence.shape)out, hs self.lstm(sequence, hs) # lstm的输出格式batch_size, sequence_length, hidden_sizeprint(----, out.shape)# out out.reshape(-1, self.hidden_size) # 这里需要将out转换为linear的输入格式即batch_size * sequence_length, hidden_sizeprint(, out[:, -1].shape)output self.linear(out[:, -1]) # 只取[bacth_sizehidden_size]即找到batch_size里每个元素的标签吧print(output-----:, output.shape)return output, hsdef onehot_encode(self, data):return self.encoder.transform(data)def onehot_decode(self, data):return self.encoder.inverse_transform(data)def label_encode(self, data):return np.array([self.char_int[ch] for ch in data])def label_decode(self, data):return np.array([self.int_char[ch] for ch in data])
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/914862.shtml
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!