自然语言处理(NLP)算法原理与实现
传统机器学习算法
Naïve Bayes基于贝叶斯定理,假设特征之间相互独立。通过计算先验概率和条件概率,对文本进行分类。
MaxEnt(最大熵模型)通过最大化熵值,在给定约束条件下找到最优概率分布。
HMM(隐马尔可夫模型)用于序列标注,通过状态转移和观测概率建模序列数据。
CRF(条件随机场)是判别式图模型,直接建模条件概率,适合处理长距离依赖关系。
LDA(潜在狄利克雷分配)是主题模型,通过文档-主题-词的三层结构挖掘文本主题分布。
以下是几种经典机器学习模型的Python实现示例,涵盖文本分类和序列标注任务:
Naïve Bayes(文本分类)
python
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
# 示例数据
texts = ["good movie", "bad plot", "great acting"]
labels = [1, 0, 1] # 1:正面, 0:负面
# 特征提取
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(texts)
# 训练模型
clf = MultinomialNB()
clf.fit(X, labels)
# 预测新样本
new_text = ["excellent performance"]
print(clf.predict(vectorizer.transform(new_text))) # 输出[1]
MaxEnt(最大熵模型)
python
from sklearn.linear_model import LogisticRegression # MaxEnt常用逻辑回归实现
# 使用相同数据格式
model = LogisticRegression(max_iter=1000)
model.fit(X, labels)
# 预测概率
print(model.predict_proba(vectorizer.transform(new_text)))
HMM(序列标注)
python
from hmmlearn import hmm
import numpy as np
# 示例:词性标注(隐藏状态=词性,观测=单词)
states = ["Noun", "Verb"]
observations = ["apple", "eat"]
# 转换为数字索引
state_map = {s:i for i,s in enumerate(states)}
obs_map = {o:i for i,o in enumerate(observations)}
# 模拟训练数据
X = np.array([[obs_map["apple"], obs_map["eat"]]]).T
lengths = [len(X)]
# 定义模型
model = hmm.MultinomialHMM(n_components=len(states))
model.fit(X, lengths)
# 预测状态序列
print(model.predict(np.array([[obs_map["apple"]]]).T)) # 输出对应state索引
CRF(条件随机场)
python
from sklearn_crfsuite import CRF
# 示例:命名实体识别
train_data = [
[("Apple", "ORG"), ("releases", "O"), ("iPhone", "PRODUCT")],
[("Microsoft", "ORG"), ("buys", "O"), ("GitHub", "ORG")]
]
# 特征函数示例
def word2features(sent, i):
word = sent[i][0]
return {
'word': word,
'word.lower()': word.lower(),
'word[-3:]': word[-3:],
}
# 准备训练数据
X_train = [[word2features(s, i) for i in range(len(s))] for s in train_data]
y_train = [[label for (_, label) in s] for s in train_data]
# 训练CRF
crf = CRF(algorithm='lbfgs')
crf.fit(X_train, y_train)
LDA(主题模型)
python
from sklearn.decomposition import LatentDirichletAllocation
# 文本向量化(使用词频)
lda_vectorizer = CountVectorizer(max_df=0.95, min_df=2)
lda_X = lda_vectorizer.fit_transform(texts)
# 训练LDA(假设3个主题)
lda = LatentDirichletAllocation(n_components=3, random_state=0)
lda.fit(lda_X)
# 查看主题词分布
feature_names = lda_vectorizer.get_feature_names_out()
for topic_idx, topic in enumerate(lda.components_):
print(f"Topic {topic_idx}:")
print(" ".join([feature_names[i] for i in topic.argsort()[:-5:-1]]))
--
深度学习算法
word2vec通过Skip-gram或CBOW架构学习词向量,捕获语义信息。
CNN用于文本分类时,通过卷积核提取局部特征。
LSTM通过门控机制解决长距离依赖问题,适合处理序列数据。
Seq2seq通过编码器-解码器结构实现机器翻译等生成任务,Attention机制能聚焦关键信息。
以下是用PyTorch实现Word2Vec、CNN文本分类、LSTM以及Seq2seq with Attention的代码示例:
Word2Vec (Skip-gram)
python
import torch
import torch.nn as nn
import torch.optim as optim
class SkipGram(nn.Module):
def __init__(self, vocab_size, embedding_dim):
super().__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear = nn.Linear(embedding_dim, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs)
out = self.linear(embeds)
return out
# 示例参数
vocab_size = 10000
embedding_dim = 300
model = SkipGram(vocab_size, embedding_dim)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
CNN文本分类
python
class TextCNN(nn.Module):
def __init__(self, vocab_size, embed_dim, num_classes, filter_sizes, num_filters):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.convs = nn.ModuleList([
nn.Conv1d(embed_dim, num_filters, k) for k in filter_sizes
])
self.fc = nn.Linear(len(filter_sizes)*num_filters, num_classes)
def forward(self, x):
x = self.embedding(x) # [batch, seq_len, embed_dim]
x = x.permute(0, 2, 1) # [batch, embed_dim, seq_len]
x = [torch.relu(conv(x)) for conv in self.convs]
x = [torch.max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = torch.cat(x, 1)
return self.fc(x)
# 示例使用
model = TextCNN(vocab_size=10000, embed_dim=300, num_classes=5,
filter_sizes=[3,4,5], num_filters=100)
LSTM
python
class LSTMClassifier(nn.Module):
def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, num_classes)
def forward(self, x):
x = self.embedding(x)
_, (hidden, _) = self.lstm(x)
return self.fc(hidden.squeeze(0))
# 示例使用
model = LSTMClassifier(vocab_size=10000, embed_dim=300,
hidden_dim=128, num_classes=5)
Seq2seq with Attention
python
class Encoder(nn.Module):
def __init__(self, input_size, embed_size, hidden_size):
super().__init__()
self.embedding = nn.Embedding(input_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True)
class Attention(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.attn = nn.Linear(hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
def forward(self, hidden, encoder_outputs):
energy = torch.tanh(self.attn(torch.cat((hidden.repeat(encoder_outputs.shape[0], 1, 1).permute(1, 0, 2),
encoder_outputs.permute(1, 0, 2)), dim=2)))
attention = torch.softmax(torch.matmul(energy, self.v), dim=1)
return attention
class Decoder(nn.Module):
def __init__(self, output_size, embed_size, hidden_size):
super().__init__()
self.attention = Attention(hidden_size)
self.embedding = nn.Embedding(output_size, embed_size)
self.lstm = nn.LSTM(embed_size + hidden_size * 2, hidden_size)
self.fc = nn.Linear(hidden_size, output_size)
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, device):
super().__init__()
self.encoder = encoder.to(device)
self.decoder = decoder.to(device)
self.device = device
这些实现展示了PyTorch中常见NLP模型的核心结构。使用时需要根据具体任务调整超参数和数据预处理流程。对于生产环境,还需要添加训练循环、数据加载器和评估指标等组件。
框架应用
Fasttext支持文本分类和词向量训练,通过分层softmax加速训练。
Tensorflow提供灵活的计算图机制,支持自定义深度学习模型搭建。
PyTorch的动态图特性更便于调试和研究。
以下是FastText在Python中实现文本分类和词向量训练的代码示例及关键说明:
安装FastText
确保已安装官方Python包:
bash
pip install fasttext
文本分类实现
python
import fasttext
# 训练数据格式:__label__<类别> <文本内容>
train_data = "train.txt"
model = fasttext.train_supervised(
input=train_data,
epoch=50,
lr=0.1,
wordNgrams=2,
loss='hs' # 使用分层softmax
)
# 保存模型
model.save_model("model.bin")
# 预测
predictions = model.predict("新样本文本", k=3) # 返回top3预测
print(predictions)
词向量训练
python
# 训练词向量(无监督模式)
model = fasttext.train_unsupervised(
input="corpus.txt",
model='skipgram', # 或'cbow'
dim=300,
ws=5,
minCount=5,
loss='hs'
)
# 获取词向量
word_vector = model.get_word_vector("单词")
print(word_vector)
# 保存向量
model.save_model("vectors.bin")
关键参数说明
loss='hs':启用分层softmax加速训练wordNgrams:设置n-gram特征,文本分类建议设为2dim:词向量维度(通常100-300)minCount:词频过滤阈值
数据格式要求
- 文本分类:每行格式为
__label__类别 文本内容 - 词向量训练:每行一个已分词的句子(英文需空格分隔)
寻优算法
grid search通过预定义参数网格进行穷举搜索,保证全局最优但计算成本高。
random search随机采样参数组合,效率更高。
Bayesian optimization基于高斯过程建模目标函数,通过采集函数指导参数选择,兼顾效率和效果。
grid search 实现
使用 sklearn.model_selection.GridSearchCV 进行网格搜索,需预先定义参数网格(param_grid),穷举所有组合:
python
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
param_grid = {
'n_estimators': [50, 100, 200],
'max_depth': [None, 10, 20],
'min_samples_split': [2, 5]
}
model = RandomForestClassifier()
grid_search = GridSearchCV(model, param_grid, cv=5, scoring='accuracy')
grid_search.fit(X_train, y_train)
best_params = grid_search.best_params_
random search 实现
通过 RandomizedSearchCV 随机采样参数组合,需指定采样次数(n_iter):
python
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint
param_dist = {
'n_estimators': randint(50, 200),
'max_depth': randint(3, 20),
'min_samples_split': randint(2, 10)
}
random_search = RandomizedSearchCV(
model, param_dist, n_iter=20, cv=5, scoring='accuracy'
)
random_search.fit(X_train, y_train)
best_params = random_search.best_params_
Bayesian optimization 实现
使用 scikit-optimize 的 BayesSearchCV 或 hyperopt 库。以 BayesSearchCV 为例:
python
from skopt import BayesSearchCV
from skopt.space import Integer, Real
search_spaces = {
'n_estimators': Integer(50, 200),
'max_depth': Integer(3, 20),
'min_samples_split': Integer(2, 10)
}
bayes_search = BayesSearchCV(
model, search_spaces, n_iter=20, cv=5, scoring='accuracy'
)
bayes_search.fit(X_train, y_train)
best_params = bayes_search.best_params_
关键对比
- 计算成本: Grid Search > Bayesian > Random
- 效果: Bayesian ≈ Grid > Random(尤其在高维空间)
- 适用场景 :
- 小参数空间用
GridSearchCV - 大参数空间优先
BayesSearchCV或RandomizedSearchCV
- 小参数空间用
代码需安装依赖库:
bash
pip install scikit-learn scikit-optimize
如有偏差请多多指正!