LDA算法进行相似性分析

复制代码
import gensim
from gensim import corpora
from gensim.models import LdaModel
from gensim.matutils import cossim
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string

# 如果您尚未下载nltk的停用词列表,请取消下面的注释并运行一次
# nltk.download('punkt')
# nltk.download('stopwords')

# 数据预处理函数
def preprocess(text):
    stop_words = set(stopwords.words('english'))
    tokens = word_tokenize(text.lower())
    tokens = [word for word in tokens if word.isalpha()]  # 仅保留字母
    tokens = [word for word in tokens if word not in stop_words]  # 去除停用词
    return tokens

# 示例文档
documents = [
    "Text processing using LDA is interesting.",
    "Another document example for LDA.",
    "Text mining and natural language processing.",
    "LDA helps in topic modeling and finding patterns.",
    "This document is for testing LDA similarity."
]

# 数据预处理
texts = [preprocess(doc) for doc in documents]

# 创建词典
dictionary = corpora.Dictionary(texts)

# 转换为词袋模型
corpus = [dictionary.doc2bow(text) for text in texts]

# 训练LDA模型
num_topics = 2
lda_model = LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=15)

# 对新文档进行主题分布提取
new_doc = "New text for testing similarity with LDA."
new_doc_preprocessed = preprocess(new_doc)
new_doc_bow = dictionary.doc2bow(new_doc_preprocessed)
new_doc_topics = lda_model.get_document_topics(new_doc_bow)

# 获取原始文档的主题分布
doc_topics = [lda_model.get_document_topics(doc_bow) for doc_bow in corpus]

# 计算新文档与每个原始文档的相似性
similarities = []
for i, doc_topic in enumerate(doc_topics):
    similarity = cossim(new_doc_topics, doc_topic)
    similarities.append((i, similarity))

# 输出相似性结果
print("Similarity between new document and each original document:")
for i, similarity in similarities:
    print(f"Document {i}: Similarity = {similarity}")

import gensim

from gensim import corpora

from gensim.models import LdaModel

from gensim.matutils import cossim

import nltk

from nltk.corpus import stopwords

from nltk.tokenize import word_tokenize

import string

如果您尚未下载nltk的停用词列表,请取消下面的注释并运行一次

nltk.download('punkt')

nltk.download('stopwords')

数据预处理函数

def preprocess(text):

stop_words = set(stopwords.words('english'))

tokens = word_tokenize(text.lower())

tokens = [word for word in tokens if word.isalpha()] # 仅保留字母

tokens = [word for word in tokens if word not in stop_words] # 去除停用词

return tokens

示例文档

documents = [

"Text processing using LDA is interesting.",

"Another document example for LDA.",

"Text mining and natural language processing.",

"LDA helps in topic modeling and finding patterns.",

"This document is for testing LDA similarity."

]

数据预处理

texts = [preprocess(doc) for doc in documents]

创建词典

dictionary = corpora.Dictionary(texts)

转换为词袋模型

corpus = [dictionary.doc2bow(text) for text in texts]

训练LDA模型

num_topics = 2

lda_model = LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=15)

对新文档进行主题分布提取

new_doc = "New text for testing similarity with LDA."

new_doc_preprocessed = preprocess(new_doc)

new_doc_bow = dictionary.doc2bow(new_doc_preprocessed)

new_doc_topics = lda_model.get_document_topics(new_doc_bow)

获取原始文档的主题分布

doc_topics = [lda_model.get_document_topics(doc_bow) for doc_bow in corpus]

计算新文档与每个原始文档的相似性

similarities = []

for i, doc_topic in enumerate(doc_topics):

similarity = cossim(new_doc_topics, doc_topic)

similarities.append((i, similarity))

输出相似性结果

print("Similarity between new document and each original document:")

for i, similarity in similarities:

print(f"Document {i}: Similarity = {similarity}")

相关推荐
码农水水3 小时前
国家电网Java面试被问:TCP的BBR拥塞控制算法原理
java·开发语言·网络·分布式·面试·wpf
浮尘笔记3 小时前
Go语言临时对象池:sync.Pool的原理与使用
开发语言·后端·golang
咕噜咕噜啦啦4 小时前
Java期末习题速通
java·开发语言
BHXDML4 小时前
第七章:类与对象(c++)
开发语言·c++
梦梦代码精4 小时前
BuildingAI vs Dify vs 扣子:三大开源智能体平台架构风格对比
开发语言·前端·数据库·后端·架构·开源·推荐算法
又见野草5 小时前
C++类和对象(中)
开发语言·c++
kgduu5 小时前
js之表单
开发语言·前端·javascript
钊兵6 小时前
java实现GeoJSON地理信息对经纬度点的匹配
java·开发语言
毕设源码-钟学长6 小时前
【开题答辩全过程】以 基于Python的健康食谱规划系统的设计与实现为例,包含答辩的问题和答案
开发语言·python
秋刀鱼程序编程6 小时前
Java基础入门(五)----面向对象(上)
java·开发语言