深度学习打卡第N6周:中文文本分类-Pytorch实现

一、准备工作

数据格式:

复制代码
import torch
from torch import nn
import torchvision
from torchvision import transforms,datasets
import os,PIL,pathlib,warnings

warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available else "cpu")

import pandas as pd

# CSV 格式通常为 无表头(header=None),以制表符(sep='\t')分隔
train_data = pd.read_csv('./data/train.csv',sep='\t',header=None)
train_data.head()
复制代码
# 构造数据集迭代器
def custom_data_iter(texts,labels):
    for x,y in zip(texts,labels):
        yield x,y

train_iter = custom_data_iter(train_data[0].values[:],train_data[1].values[:])

二、数据预处理

复制代码
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
import jieba

# 中文分词方法
tokenizer = jieba.lcut

def yield_tokens(data_iter):
    for text,_ in data_iter:
        yield tokenizer(text)

vocab = build_vocab_from_iterator(yield_tokens(train_iter),specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])

label_name = list(set(train_data[1].values[:]))

text_pipeline = lambda x:vocab(tokenizer(x))
label_pipeline = lambda x:label_name.index(x)

三、模型搭建

复制代码
from torch import nn

class TextClassificationModel(nn.Module):
    def __init__(self,vocab_size,embed_dim,num_class):
        super(TextClassificationModel,self).__init__()
        self.embedding = nn.EmbeddingBag(vocab_size,embed_dim)
        self.fc = nn.Linear(embed_dim,num_class)
        self.init_weights()
    
    def init_weights(self):
        initrange = 0.5
        self.embedding.weight.data.uniform_(-initrange,initrange)
        self.fc.weight.data.uniform_(-initrange,initrange)
        self.fc.bias.data.zero_()

    def forward(self,text,offsets):
        embedded = self.embedding(text,offsets)
        return self.fc(embedded)

num_class = len(label_name)
vocab_size = len(vocab)
em_size = 64
model = TextClassificationModel(vocab_size,em_size,num_class).to(device)
model
复制代码
import time

def train(dataloader):
    model.train()
    total_acc,train_loss,total_count = 0,0,0
    log_interval = 50
    start_time = time.time()

    for idx,(text,label,offsets) in enumerate(dataloader):
        predicted_label = model(text,offsets)

        optimizer.zero_grad()
        loss = criterion(predicted_label,label)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(),0.1) # 梯度裁剪
        optimizer.step()

        total_acc += (predicted_label.argmax(1)==label).sum().item()
        train_loss += loss.item()*label.size(0)
        total_count += label.size(0)

        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('| epoch {:1d} | {:4d}/{:4d} batches '
                  '| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),
                                              total_acc/total_count, train_loss/total_count))
            total_acc, train_loss, total_count = 0, 0, 0
            start_time = time.time()

def evaluate(dataloader):
    model.eval()
    total_acc,test_loss,total_count =0,0,0

    with torch.no_grad():
        for idx,(text,label,offsets) in enumerate(dataloader):
            predicted_label = model(text,offsets)

            loss = criterion(predicted_label,label)

            total_acc += (predicted_label.argmax(1)==label).sum().item()
            test_loss += loss.item()*label.size(0)
            total_count += label.size(0)
    return total_acc/total_count,test_loss/total_count

四、训练模型

复制代码
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset

# 超参数
EPOCHS = 10
LR = 5
BATCH_SIZE = 64

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,1.0,gamma=0.1)
total_accu = None


train_iter = custom_data_iter(train_data[0].values[:],train_data[1].values[:])
train_dataset = to_map_style_dataset(train_iter)

num_train = int(len(train_dataset)*0.8)
split_train,split_valid = random_split(train_dataset,[num_train,len(train_dataset)-num_train])

train_dataloader = DataLoader(split_train,batch_size=BATCH_SIZE,shuffle=True,collate_fn=collate_batch)
valid_dataloader = DataLoader(split_valid,batch_size=BATCH_SIZE,shuffle=True,collate_fn=collate_batch)

for epoch in range(1,EPOCHS+1):
    epoch_start_time = time.time()
    train(train_dataloader)
    val_acc,val_loss = evaluate(valid_dataloader)

    lr = optimizer.state_dict()['param_groups'][0]['lr']
    
    if total_accu is not None and total_accu > val_acc:
        scheduler.step()
    else:
        total_accu = val_acc
    print('-' * 69)
    print('| epoch {:1d} | time: {:4.2f}s | '
         'valid_acc {:4.3f} valid_loss {:4.3f} | lr {:4.6f}'.format(epoch,time.time()-epoch_start_time,val_acc,val_loss,lr))
    print('-' * 69)

def predict(text):
    with torch.no_grad():
        text = torch.tensor(text_pipeline(text)).to(device)
        output = model(text,torch.tensor([0]).to(device))
        return output.argmax(1).item()
# ex_text_str = "还有南昌到哈尔滨西的火车票吗?"
ex_text_str = "我想听TWICE的新曲"
print("该文本的类别是:%s" %label_name[predict(ex_text_str)])

总结

本次学习对中文文本实现了分类,主要代码和N1周基本一致。

相关推荐
大千AI助手2 小时前
线性预热机制(Linear Warmup):深度学习训练稳定性的关键策略
人工智能·深度学习·大模型·模型训练·学习率·warmup·线性预热机制
七牛云行业应用2 小时前
企业级AI大模型选型指南:从评估部署到安全实践
大数据·人工智能·安全
龙亘川2 小时前
智慧城市SaaS平台之智慧城管十大核心功能(六):业务指导系统
人工智能·智慧城市
龙亘川2 小时前
智慧城市SaaS平台之智慧城管十大核心功能(七):后台支撑系统
服务器·人工智能·系统架构·智慧城市·运维开发·智慧城市saas平台
cms小程序插件【官方】2 小时前
pbootcms版AI自动发文插件升级到2.0版,支持AI配图、自动提取关键词
人工智能
AI 嗯啦2 小时前
计算机视觉----图像投影(透视)变换(小案例)
人工智能·opencv·计算机视觉
ZEGO即构开发者2 小时前
【ZEGO即构开发者日报】微信公众号上线“智能回复”功能;2025年8月中国应用/游戏厂商出海收入Top30榜;土耳其宣布将封禁29款社交/社媒应用……
人工智能·实时音视频·业界资讯·娱乐
1373i3 小时前
【Python】通俗理解反向传播
深度学习·算法·机器学习
serve the people3 小时前
召回率(Recall)
人工智能