NLP新手入门-第N2周:中文文本分类-Pytorch实现

  • 本文为365天深度学习训练营中的学习记录博客
  • 原作者:K同学啊|接辅导、项目定制

目录

  • 一、课题背景和开发环境
  • 二、数据预处理
    • 1.加载数据
    • 2.构建词典
    • 3.生成数据批次和迭代器
  • 三、模型构建
    • 1. 搭建模型
    • 2. 初始化模型
    • 3. 定义训练与评估函数
  • 四、训练模型
    • 1. 拆分数据集并运行模型
    • 2. 测试指定数据

一、课题背景和开发环境

第N2周:中文文本分类-Pytorch实现

  • Python 3.8.12
  • pytorch==1.8.1+cu111
  • torchtext==0.9.1
  • portalocker==2.7.0

本周任务:

  • 根据文本内容(第1列)预测文本标签(第2列)
  • 尝试根据第一周的内容独立实现,尽可能的不看本文的代码

与上周不同的地方:

  • 加载的是本地数据
  • 从英文变为了中文
  • 文本标签需要进一步预处理

本次将使用PyTorch实现中文文本分类。主要代码与N1周基本一致,不同的是本次任务中使用了本地的中文数据,数据示例如下:
NLP新手入门-第N2周:中文文本分类-Pytorch实现_第1张图片

二、数据预处理

1.加载数据

''' 加载自定义中文数据 '''
train_data = pd.read_csv('./data/train.csv', sep='\t', header=None)
train_data.head()
# 构造数据集迭代器
def coustom_data_iter(texts, labels):
    for x, y in zip(texts, labels):
        yield x, y
train_iter = coustom_data_iter(train_data[0].values[:], train_data[1].values[:])

2.构建词典

''' 构建词典 '''
# 中文分词方法
tokenizer = jieba.lcut
counter = Counter()
for (line, label) in train_iter:
    counter.update(tokenizer(line))
vocab = Vocab(counter, min_freq=1)
print([vocab[token] for token in tokenizer("我想看和平精英上战神必备技巧的游戏视频")])

''' 准备数据处理管道 '''
label_name = list(set(train_data[1].values[:]))
print(label_name)
text_pipeline  = lambda x: [vocab[token] for token in tokenizer(x)]
label_pipeline = lambda x: label_name.index(x)
print(text_pipeline('我想看和平精英上战神必备技巧的游戏视频'))
print(label_pipeline('Video-Play'))
Building prefix dict from the default dictionary ...
Loading model from cache C:\Users\OAIXNA~1\AppData\Local\Temp\jieba.cache
Loading model cost 0.625 seconds.
Prefix dict has been built successfully.
[3, 11, 14, 974, 1080, 147, 7725, 7575, 7794, 2, 187, 29]

['Weather-Query', 'Other', 'TVProgram-Play', 'Alarm-Update', 'Audio-Play', 'Radio-Listen', 'Calendar-Query', 'HomeAppliance-Control', 'Video-Play', 'FilmTele-Play', 'Travel-Query', 'Music-Play']
[3, 11, 14, 974, 1080, 147, 7725, 7575, 7794, 2, 187, 29]
8

3.生成数据批次和迭代器

''' 生成数据批次和迭代器 '''
def collate_batch(batch):
    label_list, text_list, offsets = [], [], [0]
    for (_text, _label) in batch:
        label_list.append(label_pipeline(_label))
        processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)      # torch.Size([41]), torch.Size([58])...
        text_list.append(processed_text)
        offsets.append(processed_text.size(0))
    
    label_list = torch.tensor(label_list, dtype=torch.int64)  # torch.Size([64])
    offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)  # torch.Size([64])
    text_list = torch.cat(text_list)  # 若干tensor组成的列表变成一个tensor
    return label_list.to(device), text_list.to(device), offsets.to(device)
# 数据加载器
#dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)

三、模型构建

1. 搭建模型

''' 搭建文本分类模型 '''
class TextClassificationModel(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_class):
        super(TextClassificationModel, self).__init__()
        self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)
        self.fc = nn.Linear(embed_dim, num_class)
        self.init_weights()

    def init_weights(self):
        initrange = 0.5
        self.embedding.weight.data.uniform_(-initrange, initrange)  # 将tensor用从均匀分布中抽样得到的值填充
        self.fc.weight.data.uniform_(-initrange, initrange)
        self.fc.bias.data.zero_()  # 偏置值归零

    def forward(self, text, offsets):
        embedded = self.embedding(text, offsets)  # torch.Size([64, 64])
        output = self.fc(embedded)  # torch.Size([64, 4])
        return output

2. 初始化模型

''' 初始化实例 '''
num_class  = len(label_name)
vocab_size = len(vocab)  # 词典大小
emsize     = 64          # 嵌入的维度
model      = TextClassificationModel(vocab_size, emsize, num_class).to(device)

3. 定义训练与评估函数

''' 训练函数 '''
def train(dataloader):
    model.train()  # 训练模式
    total_acc, train_loss, total_count = 0, 0, 0
    log_interval = 500
    start_time = time.time()
    
    for idx, (text, label, offsets) in enumerate(dataloader):
        optimizer.zero_grad()  # grad属性归零
        predited_label = model(text, offsets)
        loss = criterion(predited_label, label)
        loss.backward()  # 反向传播
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)  # 梯度裁剪
        optimizer.step()  # 每一步自动更新
        # 记录acc与loss
        total_acc += (predited_label.argmax(1) == label).sum().item()
        train_loss  += loss.item()
        total_count += label.size(0)
        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('| epoch {:3d} | {:5d}/{:5d} batches, train_acc {:8.3f} train_loss {:8.3f}'.format(epoch, idx, len(dataloader), total_acc/total_count, train_loss/total_count))
            total_acc, train_loss, total_count = 0, 0, 0
            start_time = time.time()


''' 评估函数 '''
def evaluate(dataloader):
    model.eval()  # 切换为测试模式
    total_acc, train_loss, total_count = 0, 0, 0
    
    with torch.no_grad():
        for idx, (text, label, offsets) in enumerate(dataloader):
            predited_label = model(text, offsets)
            loss = criterion(predited_label, label)  # 计算loss值
            # 记录测试数据
            total_acc += (predited_label.argmax(1) == label).sum().item()
            train_loss  += loss.item()
            total_count += label.size(0)
    return total_acc/total_count, train_loss/total_count


''' 预测函数 '''
def predict(text, text_pipeline):
    with torch.no_grad():
        text = torch.tensor(text_pipeline(text))
        output = model(text, torch.tensor([0]))
        return output.argmax(1).item()

四、训练模型

1. 拆分数据集并运行模型

''' 开始训练 '''
if __name__ == '__main__':
    # 超参数(Hyperparameters)
    EPOCHS = 10  # epoch
    LR = 5  # learning rate
    BATCH_SIZE = 64  # batch size for training
   
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=LR)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
    total_accu = None
    # 构建数据集
    train_iter = coustom_data_iter(train_data[0].values[:], train_data[1].values[:])
    train_dataset = list(train_iter)
    # 划分数据集
    num_train = int(len(train_dataset) * 0.8)
    split_train_, split_valid_ = random_split(train_dataset, [num_train, len(train_dataset) - num_train])
    # 加载数据集
    train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)      # shuffle表示随机打乱
    valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)
    
    for epoch in range(1, EPOCHS + 1):
        epoch_start_time = time.time()
        train(train_dataloader)
        accu_val, loss_val = evaluate(valid_dataloader)
        # 获取当前的学习率
        lr = optimizer.state_dict()['param_groups'][0]['lr']
        if total_accu is not None and total_accu > accu_val:
            scheduler.step()
        else:
            total_accu = accu_val
        print('-' * 59)
        print('| end of epoch {:3d} | time: {:5.2f}s | '
              'valid_acc {:8.3f} valid_loss {:8.3f} | lr {:8.6f}'.format(epoch, time.time()-epoch_start_time, accu_val, loss_val, lr))
        print('-' * 59)
    
    torch.save(model.state_dict(), 'output\\model_TextClassification.pth')
   
    print('Checking the results of test dataset.')
    accu_test, loss_test = evaluate(valid_dataloader)
    print('test accuracy {:8.3f}, test loss {:8.3f}'.format(accu_test, loss_test))
-----------------------------------------------------------
| end of epoch   1 | time:  2.23s | valid_acc    0.793 valid_loss    0.012 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   2 | time:  1.75s | valid_acc    0.834 valid_loss    0.009 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   3 | time:  1.95s | valid_acc    0.863 valid_loss    0.007 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   4 | time:  2.45s | valid_acc    0.871 valid_loss    0.006 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   5 | time:  2.47s | valid_acc    0.883 valid_loss    0.006 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   6 | time:  2.44s | valid_acc    0.890 valid_loss    0.006 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   7 | time:  2.39s | valid_acc    0.893 valid_loss    0.006 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   8 | time:  2.40s | valid_acc    0.897 valid_loss    0.005 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch   9 | time:  2.47s | valid_acc    0.895 valid_loss    0.006 | lr 5.000000
-----------------------------------------------------------
-----------------------------------------------------------
| end of epoch  10 | time:  2.36s | valid_acc    0.901 valid_loss    0.005 | lr 0.500000
-----------------------------------------------------------
Checking the results of test dataset.
test accuracy    0.901, test loss    0.005

2. 测试指定数据

''' 以下是预测 '''
if __name__=='__main__':
    model.load_state_dict(torch.load('output\\model_TextClassification.pth'))
    
    ex_text_str = "随便播放一首专辑阁楼里的佛里的歌"
    #ex_text_str = "还有双鸭山到淮阴的汽车票吗13号的"
    model = model.to(device)
    
    print("该文本的类别是:%s" % label_name[predict(ex_text_str, text_pipeline)])
该文本的类别是:Music-Play

你可能感兴趣的:(365天深度学习训练记录,pytorch,自然语言处理,分类,nlp)