使用huggingface的text embedding models

python 复制代码
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader

embeddings = HuggingFaceEmbeddings()

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# # 第一次存入本地
# vectorstore = FAISS.from_documents(documents, embeddings)
# vectorstore.save_local("faiss_index2")


# 记录开始时间
start_time = time.time()

# # 从本地加载
vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")

上面是使用的默认的模型,以下指定使用 all-MiniLM-L6-v2:

python 复制代码
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader
model_name = "all-MiniLM-L6-v2"
embeddings = HuggingFaceEmbeddings(
         model_name=model_name,
)

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# 记录开始时间
start_time = time.time()

# 第一次存入本地
vectorstore = FAISS.from_documents(documents, embeddings)
vectorstore.save_local("faiss_index2")


# # 从本地加载
# vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")

关于可以使用的模型,可以看这里

相关推荐
ASS-ASH2 天前
AI时代之向量数据库概览
数据库·人工智能·python·llm·embedding·向量数据库·vlm
玄同7657 天前
LangChain v1.0+ Retrieval模块完全指南:从文档加载到RAG实战
人工智能·langchain·知识图谱·embedding·知识库·向量数据库·rag
Loo国昌7 天前
【垂类模型数据工程】第四阶段:高性能 Embedding 实战:从双编码器架构到 InfoNCE 损失函数详解
人工智能·后端·深度学习·自然语言处理·架构·transformer·embedding
自己的九又四分之三站台13 天前
8:大语言模型是无状态以及大语言模型的基石Embedding
人工智能·语言模型·embedding
laplace012313 天前
大模型整个训练流程
人工智能·深度学习·embedding·agent·rag
汗流浃背了吧,老弟!13 天前
构建RAG系统时,如何选择合适的嵌入模型(Embedding Model)?
人工智能·python·embedding
Philtell15 天前
Diffusion Model扩散模型中的time embeding的作用
embedding
zhangfeng113315 天前
大语言模型 bpe算法 后面对接的是 one-hot吗 nn.Embedding
算法·语言模型·embedding
andwhataboutit?15 天前
embedding model
embedding
程序员泠零澪回家种桔子18 天前
RAG中的Embedding技术
人工智能·后端·ai·embedding