langchain chroma 与 chromadb笔记

chromadb可独立使用也可搭配langchain 框架使用。

环境:

python 3.9

langchain=0.2.16

chromadb=0.5.3

chromadb 使用示例

python 复制代码
import chromadb
from chromadb.config import Settings
from chromadb.utils import embedding_functions


# 加载embedding模型
en_embedding_name = "/home/model/peft_prac/all-MiniLM-L6-v2"  
ef = embedding_functions.SentenceTransformerEmbeddingFunction(
 en_embedding_name, "cuda:2", True
 )

# 实例化chromadb,添加一个collection
collection_first = 'coll_1st'
client_test = chromadb.Client()
collection = client_test.create_collection(name=collection_first, embedding_function=ef) 


# 添加数据三元组,list类型
collection.add(
    documents=["it's an apple", "this is a book"], 
    metadatas=[{"source": "t4"},  {"source": "t5"}], 
    ids=["id4",  "id5"])

## 统计collection的items数量
collection.count()

# 查找数据
coll2 = client_test.get_collection(collection_first)
print('check_collection',  coll2.peek(1)) # 取出第一个数据,此时embedding有值
print('check_collection',  coll2.get(ids=["id4"])) # 选择第一个数据,此时embedding无值
collection = client.get_or_create_collection("testname") # 有则获取,无则创建

# 更新数据
collection.upsert(
    ids=["id4", ...],
    embeddings=[[1.1, 2.3, 3.2], ...], #非必须
    metadatas=[{"chapter": "3", "verse": "16"} ...],
    documents=["it's a book", ...],
)

# 使用embedding 检索

collection.query(
    query_embeddings=[[1.1, 2.3, 3.2]],
    n_results=1,
    where={"style": "style2"}
)

# 使用text 检索(使用更新前的数据检索),distance越小,语义越接近
print('chromadb_search', coll2.query(query_texts="it's a book", n_results=2))
output:
chromadb_search {'ids': [['id5', 'id4']], 'distances': [[0.3473210334777832, 1.2127960920333862]], 'metadatas': [[{'source': 't5'}, {'source': 't4'}]], 'embeddings': None, 'documents': [['this is a book', "it's an apple"]], 'uris': None, 'data': None, 'included': ['metadatas', 'documents', 'distances']}

# 使用text 检索(使用更新后的数据检索),注意:本体检索,distance 却不是1
print('chromadb_search', coll2.query(query_texts="it's a book", n_results=1))
output:
chromadb_search {'ids': [['id4']], 'distances': [[1.168771351402198e-12]], 'metadatas': [[{'info': 'new data', 'source': 't4'}]], 'embeddings': None, 'documents': [["it's a book"]], 'uris': None, 'data': None, 'included': ['metadatas', 'documents', 'distances']}

langchain chroma 使用示例

python 复制代码
import chromadb
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_chroma import Chroma


# 加载embedding 模型
en_embedding_name = "/home/zmh/peft_prac/all-MiniLM-L6-v2"  
embeddings = HuggingFaceEmbeddings(
    model_name = en_embedding_name,
    model_kwargs={"device": "cuda:1"}
)

# 创建db, 还可以在本地保存db
collection_test = 'llama2_demo'
db = Chroma(
    client=client_test, 
    collection_name=collection_test,
    embedding_function=embeddings, 
    persist_directory='db/'
)

# 基本数据信息
student_info = "Alexandra Thompson, a 19-year-old computer science sophomore with a 3.7 GPA, is a member of the programming and chess clubs who enjoys pizza, swimming, and hiking in her free time in hopes of working at a tech company after graduating from the University of Washington."

club_info = "The university chess club provides an outlet for students to come together and enjoy playing the classic strategy game of chess. Members of all skill levels are welcome, from beginners learning the rules to experienced tournament players. The club typically meets a few times per week to play casual games participate in tournaments, analyze famous chess matches, and improve members' skills."

university_info = "The University of Washington, founded in 1861 in Seattle, is a public research university with over 45,000 students across three campuses in Seattle, Tacoma, and Bothell. "As the flagship institution of the six public universities in Washington state, UW encompasses over 500 buildings and 20 million square feet of space, including one of the largest library systems in the world."


texts_org = [student_info, club_info, university_info]
text_meta = [{"source": 'student_info'},  {"source": 'club_info'},  {"source": 'university_info'}]
text_ids = ['101',  '102',  '103']

# 处理数据,
text_splitter = CharacterTextSplitter(separator='.', chunk_size=1000, chunk_overlap=0)
texts_doctment = text_splitter.create_documents(texts_org, metadatas=text_meta)
# 添加数据
db.add_documents(texts_doctment, ids=text_ids)

#查询数据
coll = db._collection
print('coll', type(coll), coll.name, coll.metadata)
output:
coll <class 'chromadb.api.models.Collection.Collection'> llama2_demo None
print('sample of db_info',  coll.peek(1)) # 获取第一个数据
print("collection_info", coll.get()) # 获取整个集合的数据


#检索数据,返回的是直接的document 信息,没有distance 分数
res = db.similarity_search("What is the student name?", k=2)
print('res',  res)
output:
res [Document(metadata={'source': 'student_info'}, page_content='Alexandra Thompson, a 19-year-old computer science sophomore with a 3.7 GPA, is a member of the programming and chess clubs who enjoys pizza, swimming, and hiking in her free time in hopes of working at a tech company after graduating from the University of Washington'), Document(metadata={'source': 'club_info'}, page_content="The university chess club provides an outlet for students to come together and enjoy playing the classic strategy game of chess. Members of all skill levels are welcome, from beginners learning the rules to experienced tournament players. The club typically meets a few times per week to play casual games participate in tournaments, analyze famous chess matches, and improve members' skills")]

参考

1 ChromaDB python 使用教程及记录 - 知乎

2 langchain chromadb 的部分信息参考某个博客,忘了,待补充

相关推荐
岑梓铭1 小时前
考研408《计算机组成原理》复习笔记,第五章(3)——CPU的【数据通路】
笔记·考研·408·计算机组成原理·计组
掘我的金4 小时前
15_LangChain自定义Callback组件
langchain
Blossom.1187 小时前
把 AI 推理塞进「 8 位 MCU 」——0.5 KB RAM 跑通关键词唤醒的魔幻之旅
人工智能·笔记·单片机·嵌入式硬件·深度学习·机器学习·搜索引擎
草莓熊Lotso8 小时前
《吃透 C++ 类和对象(中):const 成员函数与取地址运算符重载解析》
c语言·开发语言·c++·笔记·其他
玖別ԅ(¯﹃¯ԅ)9 小时前
PID学习笔记6-倒立摆的实现
笔记·stm32·单片机
想学全栈的菜鸟阿董10 小时前
Django5个人笔记
笔记
ZZHow102416 小时前
JavaWeb开发_Day05
java·笔记·web
饕餮争锋18 小时前
设计模式笔记_行为型_观察者模式
笔记·观察者模式·设计模式
I'm a winner19 小时前
新手入门Makefile:FPGA项目实战教程(二)
笔记·fpga开发
东方不败之鸭梨的测试笔记20 小时前
智能测试用例生成工具设计
人工智能·ai·langchain