本文选择将模型下载到本地,进行离线分析计算,也可以使用在线下载,但本文略过
1 下载bert_base_chinese
下载地址:https://huggingface.co/google-bert/bert-base-chinese/tree/main
下载图上红框内的四个文件,并按照下图的目录结构放置
bert-base-chinese文件夹里放
2 代码
python
import torch
from transformers import BertTokenizer, BertModel
from torch.nn.functional import cosine_similarity
# 初始化分词器和模型
vocab_file = 'D:/code/python/rpa/vocab.txt'
tokenizer = BertTokenizer.from_pretrained(vocab_file)
model = BertModel.from_pretrained('D:/code/python/rpa/bert-base-chinese')
def get_bert_embeddings(text):
# 对文本进行分词
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
# 获取BERT的输出
with torch.no_grad():
outputs = model(**inputs)
# 获取最后一层的隐藏状态
last_hidden_states = outputs.last_hidden_state
# 取[CLS]标记的输出作为句子的表示
sentence_embedding = last_hidden_states[:, 0, :]
return sentence_embedding
# 计算两个文本的语义相似度
def calculate_similarity(text1, text2):
emb1 = get_bert_embeddings(text1)
emb2 = get_bert_embeddings(text2)
# 计算余弦相似度
# 将emb1和emb2调整为(batch_size, 1, embedding_dim),以便使用cosine_similarity
similarity = cosine_similarity(emb1.unsqueeze(1), emb2.unsqueeze(1), dim=2)
return similarity.item()
# 主函数
def main(text1, text2):
similarity = calculate_similarity(text1, text2)
print(f"The semantic similarity between the texts is: {similarity}")
text1 = '我的身体很健康'
text2 = '我没有生病'
main(text1, text2)
bash
# result
The semantic similarity between the texts is: 0.8934338092803955