使用huggingface的text embedding models

python 复制代码
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader

embeddings = HuggingFaceEmbeddings()

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# # 第一次存入本地
# vectorstore = FAISS.from_documents(documents, embeddings)
# vectorstore.save_local("faiss_index2")


# 记录开始时间
start_time = time.time()

# # 从本地加载
vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")

上面是使用的默认的模型,以下指定使用 all-MiniLM-L6-v2:

python 复制代码
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader
model_name = "all-MiniLM-L6-v2"
embeddings = HuggingFaceEmbeddings(
         model_name=model_name,
)

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# 记录开始时间
start_time = time.time()

# 第一次存入本地
vectorstore = FAISS.from_documents(documents, embeddings)
vectorstore.save_local("faiss_index2")


# # 从本地加载
# vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")

关于可以使用的模型,可以看这里

相关推荐
XISHI_TIANLAN1 天前
【多模态学习】Q&A3:FFN的作用?Embedding生成方法的BERT和Word2Vec?非线性引入的作用?
学习·bert·embedding
勇往直前plus2 天前
Milvus快速入门以及用 Java 操作 Milvus
java·spring boot·embedding·milvus
ZHOU_WUYI6 天前
Qwen3-Embedding-0.6B 模型结构
embedding
你是个什么橙8 天前
自然语言处理NLP:嵌入层Embedding中input_dim的计算——Tokenizer文本分词和编码
人工智能·自然语言处理·embedding
小马过河R10 天前
GPT-5原理
人工智能·gpt·深度学习·语言模型·embedding
df007df11 天前
【RAGFlow代码详解-10】文本处理和查询处理
人工智能·ocr·embedding·llama
liliangcsdn15 天前
基于llama.cpp的量化版reranker模型调用示例
人工智能·数据分析·embedding·llama·rerank
一粒马豆16 天前
chromadb使用hugging face模型时利用镜像网站下载注意事项
python·embedding·chroma·词嵌入·hugging face·词向量·chromadb
dundunmm19 天前
【论文阅读】SIMBA: single-cell embedding along with features(2)
论文阅读·人工智能·embedding·生物信息·单细胞·多组学·细胞类型识别
dundunmm20 天前
【论文阅读】SIMBA: single-cell embedding along with features(1)
论文阅读·深度学习·神经网络·embedding·生物信息·单细胞·多组学