深度学习打卡第N6周:中文文本分类-Pytorch实现

一、准备工作

数据格式:

复制代码
import torch
from torch import nn
import torchvision
from torchvision import transforms,datasets
import os,PIL,pathlib,warnings

warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available else "cpu")

import pandas as pd

# CSV 格式通常为 无表头(header=None),以制表符(sep='\t')分隔
train_data = pd.read_csv('./data/train.csv',sep='\t',header=None)
train_data.head()
复制代码
# 构造数据集迭代器
def custom_data_iter(texts,labels):
    for x,y in zip(texts,labels):
        yield x,y

train_iter = custom_data_iter(train_data[0].values[:],train_data[1].values[:])

二、数据预处理

复制代码
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
import jieba

# 中文分词方法
tokenizer = jieba.lcut

def yield_tokens(data_iter):
    for text,_ in data_iter:
        yield tokenizer(text)

vocab = build_vocab_from_iterator(yield_tokens(train_iter),specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])

label_name = list(set(train_data[1].values[:]))

text_pipeline = lambda x:vocab(tokenizer(x))
label_pipeline = lambda x:label_name.index(x)

三、模型搭建

复制代码
from torch import nn

class TextClassificationModel(nn.Module):
    def __init__(self,vocab_size,embed_dim,num_class):
        super(TextClassificationModel,self).__init__()
        self.embedding = nn.EmbeddingBag(vocab_size,embed_dim)
        self.fc = nn.Linear(embed_dim,num_class)
        self.init_weights()
    
    def init_weights(self):
        initrange = 0.5
        self.embedding.weight.data.uniform_(-initrange,initrange)
        self.fc.weight.data.uniform_(-initrange,initrange)
        self.fc.bias.data.zero_()

    def forward(self,text,offsets):
        embedded = self.embedding(text,offsets)
        return self.fc(embedded)

num_class = len(label_name)
vocab_size = len(vocab)
em_size = 64
model = TextClassificationModel(vocab_size,em_size,num_class).to(device)
model
复制代码
import time

def train(dataloader):
    model.train()
    total_acc,train_loss,total_count = 0,0,0
    log_interval = 50
    start_time = time.time()

    for idx,(text,label,offsets) in enumerate(dataloader):
        predicted_label = model(text,offsets)

        optimizer.zero_grad()
        loss = criterion(predicted_label,label)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(),0.1) # 梯度裁剪
        optimizer.step()

        total_acc += (predicted_label.argmax(1)==label).sum().item()
        train_loss += loss.item()*label.size(0)
        total_count += label.size(0)

        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('| epoch {:1d} | {:4d}/{:4d} batches '
                  '| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),
                                              total_acc/total_count, train_loss/total_count))
            total_acc, train_loss, total_count = 0, 0, 0
            start_time = time.time()

def evaluate(dataloader):
    model.eval()
    total_acc,test_loss,total_count =0,0,0

    with torch.no_grad():
        for idx,(text,label,offsets) in enumerate(dataloader):
            predicted_label = model(text,offsets)

            loss = criterion(predicted_label,label)

            total_acc += (predicted_label.argmax(1)==label).sum().item()
            test_loss += loss.item()*label.size(0)
            total_count += label.size(0)
    return total_acc/total_count,test_loss/total_count

四、训练模型

复制代码
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset

# 超参数
EPOCHS = 10
LR = 5
BATCH_SIZE = 64

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,1.0,gamma=0.1)
total_accu = None


train_iter = custom_data_iter(train_data[0].values[:],train_data[1].values[:])
train_dataset = to_map_style_dataset(train_iter)

num_train = int(len(train_dataset)*0.8)
split_train,split_valid = random_split(train_dataset,[num_train,len(train_dataset)-num_train])

train_dataloader = DataLoader(split_train,batch_size=BATCH_SIZE,shuffle=True,collate_fn=collate_batch)
valid_dataloader = DataLoader(split_valid,batch_size=BATCH_SIZE,shuffle=True,collate_fn=collate_batch)

for epoch in range(1,EPOCHS+1):
    epoch_start_time = time.time()
    train(train_dataloader)
    val_acc,val_loss = evaluate(valid_dataloader)

    lr = optimizer.state_dict()['param_groups'][0]['lr']
    
    if total_accu is not None and total_accu > val_acc:
        scheduler.step()
    else:
        total_accu = val_acc
    print('-' * 69)
    print('| epoch {:1d} | time: {:4.2f}s | '
         'valid_acc {:4.3f} valid_loss {:4.3f} | lr {:4.6f}'.format(epoch,time.time()-epoch_start_time,val_acc,val_loss,lr))
    print('-' * 69)

def predict(text):
    with torch.no_grad():
        text = torch.tensor(text_pipeline(text)).to(device)
        output = model(text,torch.tensor([0]).to(device))
        return output.argmax(1).item()
# ex_text_str = "还有南昌到哈尔滨西的火车票吗?"
ex_text_str = "我想听TWICE的新曲"
print("该文本的类别是:%s" %label_name[predict(ex_text_str)])

总结

本次学习对中文文本实现了分类,主要代码和N1周基本一致。

相关推荐
小鸡吃米…5 小时前
机器学习 - K - 中心聚类
人工智能·机器学习·聚类
好奇龙猫6 小时前
【AI学习-comfyUI学习-第三十节-第三十一节-FLUX-SD放大工作流+FLUX图生图工作流-各个部分学习】
人工智能·学习
沈浩(种子思维作者)6 小时前
真的能精准医疗吗?癌症能提前发现吗?
人工智能·python·网络安全·健康医疗·量子计算
minhuan6 小时前
大模型应用:大模型越大越好?模型参数量与效果的边际效益分析.51
人工智能·大模型参数评估·边际效益分析·大模型参数选择
Cherry的跨界思维6 小时前
28、AI测试环境搭建与全栈工具实战:从本地到云平台的完整指南
java·人工智能·vue3·ai测试·ai全栈·测试全栈·ai测试全栈
MM_MS6 小时前
Halcon变量控制类型、数据类型转换、字符串格式化、元组操作
开发语言·人工智能·深度学习·算法·目标检测·计算机视觉·视觉检测
ASF1231415sd6 小时前
【基于YOLOv10n-CSP-PTB的大豆花朵检测与识别系统详解】
人工智能·yolo·目标跟踪
水如烟7 小时前
孤能子视角:“意识“的阶段性回顾,“感质“假说
人工智能
Carl_奕然7 小时前
【数据挖掘】数据挖掘必会技能之:A/B测试
人工智能·python·数据挖掘·数据分析
旅途中的宽~7 小时前
《European Radiology》:2024血管瘤分割—基于MRI T1序列的分割算法
人工智能·计算机视觉·mri·sci一区top·血管瘤·t1