使用huggingface的text embedding models

python 复制代码
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader

embeddings = HuggingFaceEmbeddings()

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# # 第一次存入本地
# vectorstore = FAISS.from_documents(documents, embeddings)
# vectorstore.save_local("faiss_index2")


# 记录开始时间
start_time = time.time()

# # 从本地加载
vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")

上面是使用的默认的模型,以下指定使用 all-MiniLM-L6-v2:

python 复制代码
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain_community.llms import Tongyi
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.embeddings import HuggingFaceEmbeddings
import os
import time
os.environ["DASHSCOPE_API_KEY"] = "sk-cc1c8314fdbd43ceaf26ec1824d5dd3b"
llm = Tongyi()

from langchain_community.document_loaders import UnstructuredURLLoader
model_name = "all-MiniLM-L6-v2"
embeddings = HuggingFaceEmbeddings(
         model_name=model_name,
)

# 记录开始时间
start_time = time.time()
text = "This is a test document."

query_result = embeddings.embed_query(text)

end_time = time.time()
# 计算并打印函数执行时间
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")
print(query_result[:3])


urls = [
    "https://en.wikipedia.org/wiki/Android_(operating_system)"
]

loader = UnstructuredURLLoader(urls=urls)
documents = loader.load_and_split()
# print(documents)


# 记录开始时间
start_time = time.time()

# 第一次存入本地
vectorstore = FAISS.from_documents(documents, embeddings)
vectorstore.save_local("faiss_index2")


# # 从本地加载
# vectorstore = FAISS.load_local("faiss_index2", embeddings)

retriever = vectorstore.as_retriever()
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know"     

Context: {context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
print(chain.invoke("what is android"))
# 计算并打印函数执行时间
end_time = time.time()
execution_time = end_time - start_time
print(f"函数执行时间: {execution_time} 秒")

关于可以使用的模型,可以看这里

相关推荐
鹏子训2 天前
AI记忆新思路:用SQLite替代向量数据库,去EMBEDDINGS化,谷歌开源Google Always On Memory Agent
数据库·人工智能·sqlite·embedding
马优晨3 天前
大语言模型(LLM)、Embedding 模型、reranker重排序模型 有什么关系
人工智能·语言模型·embedding·embedding 模型·大语言模型(llm)
猫头虎4 天前
如何搭建 24 小时 AI 直播平台:魔珐星云数字人打造无人值守 “AI 销冠” 全流程实战教程
人工智能·langchain·开源·prompt·aigc·embedding·agi
ydmy7 天前
Embedding层(个人理解)
python·深度学习·embedding
西西弗Sisyphus8 天前
Transformer 嵌入层 nn.Embedding 到底是什么?
深度学习·transformer·embedding
城管不管9 天前
嵌入模型Embedding Model
java·开发语言·python·embedding·嵌入模型
你不是我我10 天前
【Java 开发日记】向量检索的流程是怎样的?Embedding 和 Rerank 各自的作用?
embedding
j_xxx404_11 天前
【AI大模型入门(三)】大模型API接入、Ollama本地部署与RAG核心(Embedding)
人工智能·ai·embedding
算.子13 天前
【Spring AI 实战】七、Embedding 向量化与向量数据库选型对比
人工智能·spring·embedding
rising start16 天前
RAG入门与在Dify中的简单实践
embedding·dify·rag