1. OpenAI Embedding 接口使用实践测试
1. OpenAI嵌入模型示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/25 11:17
@Author : thezehui@gmail.com
@File : 01. OpenAI嵌入模型示例.py
"""
import dotenv
import numpy as np
from langchain_openai import OpenAIEmbeddings
from numpy.linalg import norm
dotenv.load_dotenv()
def cosine_similarity(vec1: list, vec2: list) -> float:
"""计算传入两个向量的余弦相似度"""
# 1.计算两个向量的点积
dot_product = np.dot(vec1, vec2)
# 2.计算向量的长度
vec1_norm = norm(vec1)
vec2_norm = norm(vec2)
# 3.计算余弦相似度
return dot_product / (vec1_norm * vec2_norm)
# 1.创建文本嵌入模型
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
# 2.嵌入文本
query_vector = embeddings.embed_query("我叫慕小课,我喜欢打篮球")
print(query_vector)
print(len(query_vector))
# 3.嵌入文档列表/字符串列表
documents_vector = embeddings.embed_documents([
"我叫慕小课,我喜欢打篮球",
"这个喜欢打篮球的人叫慕小课",
"求知若渴,虚心若愚"
])
print(len(documents_vector))
# 4.计算余弦相似度
print("向量1和向量2的相似度:", cosine_similarity(documents_vector[0], documents_vector[1]))
print("向量1和向量3的相似度:", cosine_similarity(documents_vector[0], documents_vector[2]))
2. CacheBackEmbedding 组件的使用与注意事项
1. CacheBackEmbedding使用示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/25 13:36
@Author : thezehui@gmail.com
@File : 01.CacheBackEmbedding使用示例.py
"""
import dotenv
import numpy as np
from langchain.embeddings import CacheBackedEmbeddings
from langchain.storage import LocalFileStore
from langchain_openai import OpenAIEmbeddings
from numpy.linalg import norm
dotenv.load_dotenv()
def cosine_similarity(vector1: list, vector2: list) -> float:
"""计算传入两个向量的余弦相似度"""
# 1.计算内积/点积
dot_product = np.dot(vector1, vector2)
# 2.计算向量的范数/长度
norm_vec1 = norm(vector1)
norm_vec2 = norm(vector2)
# 3.计算余弦相似度
return dot_product / (norm_vec1 * norm_vec2)
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
embeddings_with_cache = CacheBackedEmbeddings.from_bytes_store(
embeddings,
LocalFileStore("./cache/"),
namespace=embeddings.model,
query_embedding_cache=True,
)
query_vector = embeddings_with_cache.embed_query("你好,我是慕小课,我喜欢打篮球")
documents_vector = embeddings_with_cache.embed_documents([
"你好,我是慕小课,我喜欢打篮球",
"这个喜欢打篮球的人叫慕小课",
"求知若渴,虚心若愚"
])
print(query_vector)
print(len(query_vector))
print("============")
print(len(documents_vector))
print("vector1与vector2的余弦相似度:", cosine_similarity(documents_vector[0], documents_vector[1]))
print("vector2与vector3的余弦相似度:", cosine_similarity(documents_vector[0], documents_vector[2]))
3. 其他Embedding嵌入模型的配置与使用
1. Hugging Face本地嵌入模型
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/26 14:51
@Author : thezehui@gmail.com
@File : 01.Hugging Face本地嵌入模型.py
"""
from langchain_huggingface import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L12-v2",
cache_folder="./embeddings/"
)
query_vector = embeddings.embed_query("你好,我是慕小课,我喜欢打篮球游泳")
print(query_vector)
print(len(query_vector))
2. HuggingFace远程推理嵌入模型
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/26 15:36
@Author : thezehui@gmail.com
@File : 02.HuggingFace远程推理嵌入模型.py
"""
import dotenv
from langchain_huggingface import HuggingFaceEndpointEmbeddings
dotenv.load_dotenv()
embeddings = HuggingFaceEndpointEmbeddings(model="sentence-transformers/all-MiniLM-L12-v2")
query_vector = embeddings.embed_query("你好,我是慕小课,我喜欢打篮球游泳")
print(query_vector)
print(len(query_vector))
3. 百度千帆文本嵌入模型
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/26 15:53
@Author : thezehui@gmail.com
@File : 03.百度千帆文本嵌入模型.py
"""
import dotenv
from langchain_community.embeddings.baidu_qianfan_endpoint import QianfanEmbeddingsEndpoint
dotenv.load_dotenv()
embeddings = QianfanEmbeddingsEndpoint()
query_vector = embeddings.embed_query("我叫慕小课,我喜欢打篮球游泳")
print(query_vector)
print(len(query_vector))
4. Faiss向量数据库的配置与使用
1. faiss向量数据库使用示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/28 17:13
@Author : thezehui@gmail.com
@File : 1.faiss向量数据库使用示例.py
"""
import dotenv
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
dotenv.load_dotenv()
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
db = FAISS.load_local("./vector-store/", embedding, allow_dangerous_deserialization=True)
print(db.similarity_search_with_score("我养了一只猫,叫笨笨"))
5. Pinecone 向量数据库的配置与使用
1. Pinecone向量数据库使用示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/29 23:06
@Author : thezehui@gmail.com
@File : 1.Pinecone向量数据库使用示例.py
"""
import dotenv
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
dotenv.load_dotenv()
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
texts: list = [
"笨笨是一只很喜欢睡觉的猫咪",
"我喜欢在夜晚听音乐,这让我感到放松。",
"猫咪在窗台上打盹,看起来非常可爱。",
"学习新技能是每个人都应该追求的目标。",
"我最喜欢的食物是意大利面,尤其是番茄酱的那种。",
"昨晚我做了一个奇怪的梦,梦见自己在太空飞行。",
"我的手机突然关机了,让我有些焦虑。",
"阅读是我每天都会做的事情,我觉得很充实。",
"他们一起计划了一次周末的野餐,希望天气能好。",
"我的狗喜欢追逐球,看起来非常开心。",
]
metadatas: list = [
{"page": 1},
{"page": 2},
{"page": 3},
{"page": 4},
{"page": 5},
{"page": 6, "account_id": 1},
{"page": 7},
{"page": 8},
{"page": 9},
{"page": 10},
]
db = PineconeVectorStore(index_name="llmops", embedding=embedding, namespace="dataset")
# db.add_texts(texts, metadatas, namespace="dataset")
query = "我养了一只猫,叫笨笨"
print(db.similarity_search_with_relevance_scores(query))
2. Pinecone带过滤的相似性搜索
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/29 8:12
@Author : thezehui@gmail.com
@File : 2.Pinecone带过滤的相似性搜索.py
"""
import dotenv
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
dotenv.load_dotenv()
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
texts: list = [
"笨笨是一只很喜欢睡觉的猫咪",
"我喜欢在夜晚听音乐,这让我感到放松。",
"猫咪在窗台上打盹,看起来非常可爱。",
"学习新技能是每个人都应该追求的目标。",
"我最喜欢的食物是意大利面,尤其是番茄酱的那种。",
"昨晚我做了一个奇怪的梦,梦见自己在太空飞行。",
"我的手机突然关机了,让我有些焦虑。",
"阅读是我每天都会做的事情,我觉得很充实。",
"他们一起计划了一次周末的野餐,希望天气能好。",
"我的狗喜欢追逐球,看起来非常开心。",
]
metadatas: list = [
{"page": 1},
{"page": 2},
{"page": 3},
{"page": 4},
{"page": 5},
{"page": 6, "account_id": 1},
{"page": 7},
{"page": 8},
{"page": 9},
{"page": 10},
]
db = PineconeVectorStore(index_name="llmops", embedding=embedding, namespace="dataset")
# db.add_texts(texts, metadatas, namespace="dataset")
query = "我养了一只猫,叫笨笨"
print(db.similarity_search_with_relevance_scores(
query,
filter={"$or": [{"page": 5}, {"account_id": 1}]}
# filter={"page": {"$gte": 5}}
))
3. Pinecone删除数据
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/17 9:18
@Author : thezehui@gmail.com
@File : 3.Pinecone删除数据.py
"""
import dotenv
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
dotenv.load_dotenv()
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
db = PineconeVectorStore(index_name="llmops", embedding=embedding, namespace="dataset")
id = "23cb7d6f-f77d-4465-8634-9c1ca7f93895"
db.delete([id], namespace="dataset")
# pinecone_index = db.get_pinecone_index("llmops")
# pinecone_index.update(id="xxx", values=[], metadata={}, namespace="xxx")
6. TCVectorDB 向量数据库的配置与使用
1. TCVectorDB内置Embedding使用示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/29 16:05
@Author : thezehui@gmail.com
@File : 1.TCVectorDB内置Embedding使用示例.py
"""
import os
import dotenv
from langchain_community.vectorstores import TencentVectorDB
from langchain_community.vectorstores.tencentvectordb import (
ConnectionParams,
)
dotenv.load_dotenv()
db = TencentVectorDB(
embedding=None,
connection_params=ConnectionParams(
url=os.environ.get("TC_VECTOR_DB_URL"),
username=os.environ.get("TC_VECTOR_DB_USERNAME"),
key=os.environ.get("TC_VECTOR_DB_KEY"),
timeout=int(os.environ.get("TC_VECTOR_DB_TIMEOUT")),
),
database_name="llmops-test",
collection_name="dataset-builtin",
)
texts = [
"笨笨是一只很喜欢睡觉的猫咪",
"我喜欢在夜晚听音乐,这让我感到放松。",
"猫咪在窗台上打盹,看起来非常可爱。",
"学习新技能是每个人都应该追求的目标。",
"我最喜欢的食物是意大利面,尤其是番茄酱的那种。",
"昨晚我做了一个奇怪的梦,梦见自己在太空飞行。",
"我的手机突然关机了,让我有些焦虑。",
"阅读是我每天都会做的事情,我觉得很充实。",
"他们一起计划了一次周末的野餐,希望天气能好。",
"我的狗喜欢追逐球,看起来非常开心。",
]
ids = db.add_texts(texts)
print("添加文档id列表:", ids)
print(db.similarity_search_with_relevance_scores("我养了一只猫,叫笨笨"))
2. TCVectorDB外部Embedding模式示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/29 14:39
@Author : thezehui@gmail.com
@File : 2.TCVectorDB外部Embedding模式示例.py
"""
import os
import dotenv
from langchain_community.vectorstores import TencentVectorDB
from langchain_community.vectorstores.tencentvectordb import (
ConnectionParams
)
from langchain_openai import OpenAIEmbeddings
dotenv.load_dotenv()
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
db = TencentVectorDB(
embedding=embedding,
connection_params=ConnectionParams(
url=os.environ.get("TC_VECTOR_DB_URL"),
username=os.environ.get("TC_VECTOR_DB_USERNAME"),
key=os.environ.get("TC_VECTOR_DB_KEY"),
timeout=int(os.environ.get("TC_VECTOR_DB_TIMEOUT")),
),
database_name=os.environ.get("TC_VECTOR_DB_DATABASE"),
collection_name="dataset-external",
# meta_fields=[
# MetaField(name="text", data_type=META_FIELD_TYPE_STRING),
# ]
)
texts = [
"笨笨是一只很喜欢睡觉的猫咪",
"我喜欢在夜晚听音乐,这让我感到放松。",
"猫咪在窗台上打盹,看起来非常可爱。",
"学习新技能是每个人都应该追求的目标。",
"我最喜欢的食物是意大利面,尤其是番茄酱的那种。",
"昨晚我做了一个奇怪的梦,梦见自己在太空飞行。",
"我的手机突然关机了,让我有些焦虑。",
"阅读是我每天都会做的事情,我觉得很充实。",
"他们一起计划了一次周末的野餐,希望天气能好。",
"我的狗喜欢追逐球,看起来非常开心。",
]
# metadatas = [{"text": text, "page": index} for index, text in enumerate(texts)]
ids = db.add_texts(texts)
print("添加文档id列表:", ids)
print(db.similarity_search_with_score("我养了一只猫,叫笨笨"))
3. TCVectorDB带过滤的相似性搜索
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/29 16:30
@Author : thezehui@gmail.com
@File : 3.TCVectorDB带过滤的相似性搜索.py
"""
import os
import dotenv
from langchain_community.vectorstores import TencentVectorDB
from langchain_community.vectorstores.tencentvectordb import (
ConnectionParams,
MetaField,
META_FIELD_TYPE_UINT64,
)
from langchain_openai import OpenAIEmbeddings
dotenv.load_dotenv()
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
db = TencentVectorDB(
embedding=None,
connection_params=ConnectionParams(
url=os.environ.get("TC_VECTOR_DB_URL"),
username=os.environ.get("TC_VECTOR_DB_USERNAME"),
key=os.environ.get("TC_VECTOR_DB_KEY"),
timeout=int(os.environ.get("TC_VECTOR_DB_TIMEOUT")),
),
database_name=os.environ.get("TC_VECTOR_DB_DATABASE"),
collection_name="dataset-filter",
meta_fields=[
MetaField(name="page", data_type=META_FIELD_TYPE_UINT64),
]
)
texts = [
"笨笨是一只很喜欢睡觉的猫咪",
"我喜欢在夜晚听音乐,这让我感到放松。",
"猫咪在窗台上打盹,看起来非常可爱。",
"学习新技能是每个人都应该追求的目标。",
"我最喜欢的食物是意大利面,尤其是番茄酱的那种。",
"昨晚我做了一个奇怪的梦,梦见自己在太空飞行。",
"我的手机突然关机了,让我有些焦虑。",
"阅读是我每天都会做的事情,我觉得很充实。",
"他们一起计划了一次周末的野餐,希望天气能好。",
"我的狗喜欢追逐球,看起来非常开心。",
]
metadatas = [
{"page": 1},
{"page": 2},
{"page": 3},
{"page": 4},
{"page": 5},
{"page": 6, "account_id": 1},
{"page": 7},
{"page": 8},
{"page": 9},
{"page": 10},
]
ids = db.add_texts(texts, metadatas)
print("添加文档id列表:", ids)
print(db.similarity_search_with_score("我养了一只猫,叫笨笨", expr="page>=9"))
7. Weaviate 向量数据库的配置与使用
1. weaviate嵌入向量数据库示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/29 22:36
@Author : thezehui@gmail.com
@File : 1.weaviate嵌入向量数据库示例.py
"""
import dotenv
import weaviate
from langchain_openai import OpenAIEmbeddings
from langchain_weaviate import WeaviateVectorStore
from weaviate.classes.query import Filter
dotenv.load_dotenv()
# 1.原始文本数据与元数据
texts = [
"笨笨是一只很喜欢睡觉的猫咪",
"我喜欢在夜晚听音乐,这让我感到放松。",
"猫咪在窗台上打盹,看起来非常可爱。",
"学习新技能是每个人都应该追求的目标。",
"我最喜欢的食物是意大利面,尤其是番茄酱的那种。",
"昨晚我做了一个奇怪的梦,梦见自己在太空飞行。",
"我的手机突然关机了,让我有些焦虑。",
"阅读是我每天都会做的事情,我觉得很充实。",
"他们一起计划了一次周末的野餐,希望天气能好。",
"我的狗喜欢追逐球,看起来非常开心。",
]
metadatas = [
{"page": 1},
{"page": 2},
{"page": 3},
{"page": 4},
{"page": 5},
{"page": 6, "account_id": 1},
{"page": 7},
{"page": 8},
{"page": 9},
{"page": 10},
]
# 2.创建连接客户端
client = weaviate.connect_to_local("192.168.2.120", "8080")
# client = weaviate.connect_to_wcs(
# cluster_url="https://eftofnujtxqcsa0sn272jw.c0.us-west3.gcp.weaviate.cloud",
# auth_credentials=AuthApiKey("21pzYy0orl2dxH9xCoZG1O2b0euDeKJNEbB0"),
# )
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
# 3.创建LangChain向量数据库实例
db = WeaviateVectorStore(
client=client,
index_name="Dataset",
text_key="text",
embedding=embedding,
)
# 4.添加数据
ids = db.add_texts(texts, metadatas)
print(ids)
# 5.执行相似性搜索
filters = Filter.by_property("page").greater_or_equal(5)
print(db.similarity_search_with_score("笨笨", filters=filters))
retriever = db.as_retriever()
print(retriever.invoke("笨笨"))
8. 对接自定义向量数据库的配置与使用
1. 对接自定义向量数据库示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/6/30 8:12
@Author : thezehui@gmail.com
@File : 1.对接自定义向量数据库示例.py
"""
import uuid
from typing import List, Optional, Any, Iterable, Type
import dotenv
import numpy as np
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
from langchain_openai import OpenAIEmbeddings
class MemoryVectorStore(VectorStore):
"""基于内存+欧几里得距离的向量数据库"""
store: dict = {} # 存储向量的临时变量
def __init__(self, embedding: Embeddings):
self._embedding = embedding
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) -> List[str]:
"""将数据添加到向量数据库中"""
# 1.检测metadata的数据格式
if metadatas is not None and len(metadatas) != len(texts):
raise ValueError("metadatas格式错误")
# 2.将数据转换成文本嵌入/向量和ids
embeddings = self._embedding.embed_documents(texts)
ids = [str(uuid.uuid4()) for _ in texts]
# 3.通过for循环组装数据记录
for idx, text in enumerate(texts):
self.store[ids[idx]] = {
"id": ids[idx],
"text": text,
"vector": embeddings[idx],
"metadata": metadatas[idx] if metadatas is not None else {},
}
return ids
def similarity_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]:
"""传入对应的query执行相似性搜索"""
# 1.将query转换成向量
embedding = self._embedding.embed_query(query)
# 2.循环和store中的每一个向量进行比较,计算欧几里得距离
result = []
for key, record in self.store.items():
distance = self._euclidean_distance(embedding, record["vector"])
result.append({"distance": distance, **record})
# 3.排序,欧几里得距离越小越靠前
sorted_result = sorted(result, key=lambda x: x["distance"])
# 4.取数据,取k条数据
result_k = sorted_result[:k]
return [
Document(page_content=item["text"], metadata={**item["metadata"], "score": item["distance"]})
for item in result_k
]
@classmethod
def from_texts(cls: Type["MemoryVectorStore"], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any) -> "MemoryVectorStore":
"""从文本和元数据中去构建向量数据库"""
memory_vector_store = cls(embedding=embedding)
memory_vector_store.add_texts(texts, metadatas, **kwargs)
return memory_vector_store
@classmethod
def _euclidean_distance(cls, vec1: list, vec2: list) -> float:
"""计算两个向量的欧几里得距离"""
return np.linalg.norm(np.array(vec1) - np.array(vec2))
dotenv.load_dotenv()
# 1.创建初始数据与嵌入模型
texts = [
"笨笨是一只很喜欢睡觉的猫咪",
"我喜欢在夜晚听音乐,这让我感到放松。",
"猫咪在窗台上打盹,看起来非常可爱。",
"学习新技能是每个人都应该追求的目标。",
"我最喜欢的食物是意大利面,尤其是番茄酱的那种。",
"昨晚我做了一个奇怪的梦,梦见自己在太空飞行。",
"我的手机突然关机了,让我有些焦虑。",
"阅读是我每天都会做的事情,我觉得很充实。",
"他们一起计划了一次周末的野餐,希望天气能好。",
"我的狗喜欢追逐球,看起来非常开心。",
]
metadatas = [
{"page": 1},
{"page": 2},
{"page": 3},
{"page": 4},
{"page": 5},
{"page": 6, "account_id": 1},
{"page": 7},
{"page": 8},
{"page": 9},
{"page": 10},
]
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
# 2.构建自定义向量数据库
db = MemoryVectorStore(embedding=embedding)
ids = db.add_texts(texts, metadatas)
print(ids)
# 3.执行检索
print(db.similarity_search("笨笨是谁?"))
9. Document 组件与文档加载器组件的使用
1. Document与TextLoader
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/1 15:27
@Author : thezehui@gmail.com
@File : 1.Document与TextLoader.py
"""
from langchain_community.document_loaders import TextLoader
# 1.构建加载器
loader = TextLoader("./电商产品数据.txt", encoding="utf-8")
# 2.加载数据
documents = loader.load()
print(documents)
print(len(documents))
print(documents[0].metadata)
10. LangChain内置文档加载器使用技巧
1. Markdown文档加载器
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/1 18:35
@Author : thezehui@gmail.com
@File : 1.Markdown文档加载器.py
"""
from langchain_community.document_loaders import UnstructuredMarkdownLoader
loader = UnstructuredMarkdownLoader("./项目API资料.md")
documents = loader.load()
print(documents)
print(len(documents))
print(documents[0].metadata)
2. Office文档加载器
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/1 18:46
@Author : thezehui@gmail.com
@File : 2.Office文档加载器.py
"""
from langchain_community.document_loaders import (
UnstructuredPowerPointLoader,
)
# excel_loader = UnstructuredExcelLoader("./员工考勤表.xlsx", mode="elements")
# excel_documents = excel_loader.load()
# word_loader = UnstructuredWordDocumentLoader("./喵喵.docx")
# documents = word_loader.load()
ppt_loader = UnstructuredPowerPointLoader("./章节介绍.pptx")
documents = ppt_loader.load()
print(documents)
print(len(documents))
print(documents[0].metadata)
3. URL网页加载器
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/1 23:17
@Author : thezehui@gmail.com
@File : 3.URL网页加载器.py
"""
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://imooc.com")
documents = loader.load()
print(documents)
print(len(documents))
print(documents[0].metadata)
4. 通用文件加载器
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/1 23:30
@Author : thezehui@gmail.com
@File : 4.通用文件加载器.py
"""
from langchain_community.document_loaders import UnstructuredFileLoader
loader = UnstructuredFileLoader("./项目API资料.md")
documents = loader.load()
print(documents)
print(len(documents))
print(documents[0].metadata)
5. 项目API资料
md
复制代码
# LLMOps 项目 API 文档
应用 API 接口统一以 JSON 格式返回,并且包含 3 个字段:`code`、`data` 和 `message`,分别代表`业务状态码`、`业务数据`和`接口附加信息`。
`业务状态码`共有 6 种,其中只有 `success(成功)` 代表业务操作成功,其他 5 种状态均代表失败,并且失败时会附加相关的信息:`fail(通用失败)`、`not_found(未找到)`、`unauthorized(未授权)`、`forbidden(无权限)`和`validate_error(数据验证失败)`。
接口示例:
```json
{
"code": "success",
"data": {
"redirect_url": "https://github.com/login/oauth/authorize?client_id=f69102c6b97d90d69768&redirect_uri=http%3A%2F%2Flocalhost%3A5001%2Foauth%2Fauthorize%2Fgithub&scope=user%3Aemail"
},
"message": ""
}
```
带有分页数据的接口会在 `data` 内固定传递 `list` 和 `paginator` 字段,其中 `list` 代表分页后的列表数据,`paginator` 代表分页的数据。
`paginator` 内存在 4 个字段:`current_page(当前页数)` 、`page_size(每页数据条数)`、`total_page(总页数)`、`total_record(总记录条数)`,示例数据如下:
```json
{
"code": "success",
"data": {
"list": [
{
"app_count": 0,
"created_at": 1713105994,
"description": "这是专门用来存储慕课LLMOps课程信息的知识库",
"document_count": 13,
"icon": "https://imooc-llmops-1257184990.cos.ap-guangzhou.myqcloud.com/2024/04/07/96b5e270-c54a-4424-aece-ff8a2b7e4331.png",
"id": "c0759ca8-2d35-4480-83a8-1f41f29d1401",
"name": "慕课LLMOps课程知识库",
"updated_at": 1713106758,
"word_count": 8850
}
],
"paginator": {
"current_page": 1,
"page_size": 20,
"total_page": 1,
"total_record": 2
}
},
"message": ""
}
```
如果接口需要授权,需要在 `headers` 中添加 `Authorization` ,并附加 `access_token` 即可完成授权登录,示例:
```json
Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MTY0NTY3OTgsImlzcyI6ImxsbW9wcyIsInN1YiI6ImM5MDljMWRiLWIyMmUtNGZlNi04OGIyLWIyZTkxZWFiMWE3YiJ9.JDAtWDBBGiXa_XFihfopRe4Cz-RQ9_TAcno9w81tNbE
```
## 01. 应用模块
### 1.1 [todo]获取应用基础信息
- **接口说明**:传递对应的应用 id,获取当前应用的基础信息+配置信息等内容。
- **接口信息**:`授权`+`GET:/apps/:app_id`
- **接口参数**:
- 请求参数:
- `app_id -> uuid`:路由参数,必填,需要获取的应用 id。
- 响应参数:
- `id -> uuid`:应用 id,类型为 uuid。
- `name -> string`:应用名称。
- `icon -> string`:应用图标。
- `description -> string`:应用描述。
- `published_app_config_id -> uuid`:已发布应用配置 id,如果不存在则为 null。
- `drafted_app_config_id -> uuid`:草稿应用配置 id,如果不存在则为 null。
- `debug_conversation_id -> uuid`:调试会话记录 id,如果不存在则为 null。
- `published_app_config/drafted_app_config -> json`:应用配置信息,涵盖草稿配置、已发布配置,如果没有则为 null,两个配置的变量信息一致。
- `id -> uuid`:应用配置 id。
- `model_config -> json`:模型配置,类型为 json。
- `dialog_round -> int`:携带上下文轮数,类型为非负整型。
- `memory_mode -> string`:记忆类型,涵盖长记忆 `long_term_memory` 和 `none` 代表无。
- `status -> string`:应用配置的状态,`drafted` 代表草稿、`published` 代表已发布配置。
- `updated_at -> int`:应用配置的更新时间。
- `created_at -> int`:应用配置的创建时间。
- `updated_at -> int`:应用的更新时间。
- `created_at -> int`:应用的创建时间。
- **响应示例**:
```json
{
"code": "success",
"data": {
"id": "5e7834dc-bbca-4ee5-9591-8f297f5acded",
"name": "慕课LLMOps聊天机器人",
"icon": "https://imooc-llmops-1257184990.cos.ap-guangzhou.myqcloud.com/2024/04/23/e4422149-4cf7-41b3-ad55-ca8d2caa8f13.png",
"description": "这是一个慕课LLMOps的Agent应用",
"published_app_config_id": null,
"drafted_app_config_id": null,
"debug_conversation_id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"published_app_config": null,
"drafted_app_config": {
"id": "755dc464-67cd-42ef-9c56-b7528b44e7c8",
"model_config": {
"dialog_round": 3
},
"memory_mode": "long_term_memory",
"status": "draft",
"updated_at": 1714053834,
"created_at": 1714053834
},
"updated_at": 1714053834,
"created_at": 1714053834
},
"message": ""
}
```
### 1.2 [todo]更新应用草稿配置信息
- **接口说明**:更新应用的草稿配置信息,涵盖:模型配置、长记忆模式等,该接口会查找该应用原始的草稿配置并进行更新,如果没有原始草稿配置,则创建一个新配置作为草稿配置。
- **接口信息**:`授权`+`POST:/apps/:app_id/config`
- **接口参数**:
- 请求参数:
- `app_id -> str`:需要修改配置的应用 id。
- `model_config -> json`:模型配置信息。
- `dialog_round -> int`:携带上下文轮数,类型为非负整型。
- `memory_mode -> string`:记忆类型,涵盖长记忆 `long_term_memory` 和 `none` 代表无。
- **请求示例**:
```json
{
"model_config": {
"dialog_round": 10
},
"memory_mode": "long_term_memory"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "更新AI应用配置成功"
}
```
### 1.3 [todo]获取应用调试长记忆
- **接口说明**:用于获取指定应用的长记忆内容,如果该应用并没有开启长记忆,则会抛出错误信息。
- **接口信息**:`授权`+`GET:/apps/:app_id/long-term-memory`
- **接口参数**:
- 请求参数:
- `app_id -> str`:需要获取长记忆的应用 id。
- 响应参数:
- `summary -> str`:该应用最新调试会话的长记忆内容。
- **响应示例**:
```json
{
"code": "success",
"data": {
"summary": "人类自我介绍为慕小课,并要求人工智能解释LLM(大型语言模型)的概念。人工智能将LLM描述为一种基于深度学习的模型,通常建立在Transformer架构上,用于自然语言处理任务。LLM经历了一个预训练阶段,在那里他们从大量的文本数据中学习语言结构,比如维基百科的文章和书籍。它们利用自我注意机制来有效地处理长程依赖关系。经过预训练后,LLM可以针对特定的应用程序进行微调,使其功能适应文本生成、理解和分类等任务。LLM由于其多功能性和强大的语言理解和生成能力,被广泛应用于虚拟助理、翻译、情绪分析、医疗保健、金融等领域,代表了自然语言处理的前沿技术。"
},
"message": ""
}
```
### 1.4 [todo]更新应用调试长记忆
- **接口说明**:用于更新对应应用的调试长记忆内容,如果应用没有开启长记忆功能,则调用接口会发生报错。
- **接口信息**:`授权`+`POST:/apps/:app_id/long-term-memory`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要更新长记忆的应用 id。
- `summary -> str`:需要更新的长记忆内容。
- **请求示例**:
```json
{
"summary": "人类介绍自己叫慕小课,喜欢打篮球。"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "更新AI应用长记忆成功"
}
```
### 1.5 [todo]应用调试对话
- **接口说明**:用于在编排 AI 应用时进行 debug 调试,如果当前应用没有草稿配置,则使用发布配置进行调试,如果有草稿配置则以草稿配置信息进行调试。
- **接口信息**:`授权`+`POST:/apps/:app_id/debug`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要调试的 AI 应用 id,格式为 uuid。
- `query -> str`:用户发起的提问信息。
- 响应参数:
- `id -> uuid`:响应消息的 id,类型为 uuid。
- `conversation_id -> uuid`:消息关联会话的 id,类型为 uuid。
- `query -> str`:人类的输入字符串。
- `answer -> str`:AI 的生成内容。
- `answer_tokens -> int`:生成内容消耗的 Token 数。
- `response_latency -> float`:响应消耗的时间,单位为毫秒。
- `updated_at -> int`:消息的更新时间。
- `created_at -> int`:消息的创建时间。
- **请求示例**:
```json
{
"query": "能详细讲解下LLM是什么吗?"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {
"id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"conversation_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8",
"query": "能详细讲解下LLM是什么吗?",
"answer": "LLM 即 Large Language Model,大语言模型,是一种基于深度学习的自然语言处理模型,具有很高的语言理解和生成能力,能够处理各式各样的自然语言任务,例如文本生成、问答、翻译、摘要等。它通过在大量的文本数据上进行训练,学习到语言的模式、结构和语义知识。",
"answer_tokens": 1454,
"response_latency": 8541,
"updated_at": 1714053834,
"created_at": 1714053834
},
"message": ""
}
```
### 1.6 [todo]获取应用调试历史对话列表
- **接口说明**:用于获取应用调试历史对话列表信息,该接口支持分页,单次最多返回 20 组对话消息,并且分页以时间字段进行降序,接口不会返回软删除对应的数据。
- **接口信息**:`授权`+`GET:/apps/:app_id/messages`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要调试的 AI 应用 id,格式为 uuid。
- 响应参数:
- `id -> uuid`:响应消息的 id,类型为 uuid。
- `conversation_id -> uuid`:消息关联会话的 id,类型为 uuid。
- `query -> str`:人类的输入字符串。
- `answer -> str`:AI 的生成内容。
- `answer_tokens -> int`:生成内容消耗的 Token 数。
- `response_latency -> float`:响应消耗的时间,单位为毫秒。
- `updated_at -> int`:消息的更新时间。
- `created_at -> int`:消息的创建时间。
- **响应示例**:
```json
{
"code": "success",
"data": {
"list": [
{
"id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"conversation_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8",
"query": "能详细讲解下LLM是什么吗?",
"answer": "LLM 即 Large Language Model,大语言模型,是一种基于深度学习的自然语言处理模型,具有很高的语言理解和生成能力,能够处理各式各样的自然语言任务,例如文本生成、问答、翻译、摘要等。它通过在大量的文本数据上进行训练,学习到语言的模式、结构和语义知识。",
"answer_tokens": 1454,
"response_latency": 8541,
"updated_at": 1714053834,
"created_at": 1714053834
}
],
"paginator": {
"current_page": 1,
"page_size": 20,
"total_page": 1,
"total_record": 2
}
},
"message": ""
}
```
### 1.7 [todo]删除特定的调试消息
- **接口说明**:用于删除 AI 应用调试对话过程中指定的消息,该删除会在后端执行软删除操作,并且只有当会话 id 和消息 id 都匹配上时,才会删除对应的调试消息。
- **接口信息**:`授权`+`POST:/apps/:app_id/messages/:message_id/delete`
- **接口参数**:
- 请求参数:
- `app_id -> uuid`:路由参数,需要删除消息归属的应用 id,格式为 uuid。
- `message_id -> uuid`:路由参数,需要删除的消息 id,格式为 uuid。
- **请求示例**:
```json
{
"app_id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"message_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "删除调试信息成功"
}
```
11. 自定义 LangChain 文档加载器使用技巧
1. 自定义加载器使用技巧
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 8:32
@Author : thezehui@gmail.com
@File : 1.自定义加载器使用技巧.py
"""
from typing import Iterator, AsyncIterator
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
class CustomDocumentLoader(BaseLoader):
"""自定义文档加载器,将文本文件的每一行都解析成Document"""
def __init__(self, file_path: str) -> None:
self.file_path = file_path
def lazy_load(self) -> Iterator[Document]:
# 1.读取对应的文件
with open(self.file_path, encoding="utf-8") as f:
line_number = 0
# 2.提取文件的每一行
for line in f:
# 3.将每一行生成一个Document实例并通过yield返回
yield Document(
page_content=line,
metadata={"score": self.file_path, "line_number": line_number}
)
line_number += 1
async def alazy_load(self) -> AsyncIterator[Document]:
import aiofiles
async with aiofiles.open(self.file_path, encoding="utf-8") as f:
line_number = 0
async for line in f:
yield Document(
page_content=line,
metadata={"score": self.file_path, "line_number": line_number}
)
line_number += 1
loader = CustomDocumentLoader("./喵喵.txt")
documents = loader.load()
print(documents)
print(len(documents))
print(documents[0].metadata)
12. Blob与BlobParser代替文档加载器
1. Blob解析器示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 10:51
@Author : thezehui@gmail.com
@File : 1.Blob解析器示例.py
"""
from typing import Iterator
from langchain_core.document_loaders import Blob
from langchain_core.document_loaders.base import BaseBlobParser
from langchain_core.documents import Document
class CustomParser(BaseBlobParser):
"""自定义解析器,用于将传入的文本二进制数据的每一行解析成Document组件"""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
line_number = 0
with blob.as_bytes_io() as f:
for line in f:
yield Document(
page_content=line,
metadata={"source": blob.source, "line_number": line_number}
)
line_number += 1
# 1.加载blob数据
blob = Blob.from_path("./喵喵.txt")
parser = CustomParser()
# 2.解析得到文档数据
documents = list(parser.lazy_parse(blob))
# 3.输出相应的信息
print(documents)
print(len(documents))
print(documents[0].metadata)
2. FileSystemBlobLoader示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 11:10
@Author : thezehui@gmail.com
@File : 2.FileSystemBlobLoader示例.py
"""
from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader
loader = FileSystemBlobLoader(".", show_progress=True)
for blob in loader.yield_blobs():
print(blob.as_string())
3. GenericLoader示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 11:16
@Author : thezehui@gmail.com
@File : 3.GenericLoader示例.py
"""
from langchain_community.document_loaders.generic import GenericLoader
loader = GenericLoader.from_filesystem(".", glob="*.txt", show_progress=True)
for idx, doc in enumerate(loader.lazy_load()):
print(f"当前正在加载第{idx}个文件,文件名:{doc.metadata['source']}")
13. 文档转换器与文本分割器组件的使用
1. 字符分割器使用示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 13:16
@Author : thezehui@gmail.com
@File : 1.字符分割器使用示例.py
"""
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_text_splitters import CharacterTextSplitter
# 1.加载对应的文档
loader = UnstructuredMarkdownLoader("./项目API文档.md")
documents = loader.load()
# 2.创建文本分割器
text_splitter = CharacterTextSplitter(
separator="\n\n",
chunk_size=500,
chunk_overlap=50,
add_start_index=True,
)
# 3.分割文本
chunks = text_splitter.split_documents(documents)
for chunk in chunks:
print(f"块大小:{len(chunk.page_content)}, 元数据:{chunk.metadata}")
print(len(chunks))
2. 项目API文档
md
复制代码
# LLMOps 项目 API 文档
应用 API 接口统一以 JSON 格式返回,并且包含 3 个字段:`code`、`data` 和 `message`,分别代表`业务状态码`、`业务数据`和`接口附加信息`。
`业务状态码`共有 6 种,其中只有 `success(成功)` 代表业务操作成功,其他 5 种状态均代表失败,并且失败时会附加相关的信息:`fail(通用失败)`、`not_found(未找到)`、`unauthorized(未授权)`、`forbidden(无权限)`和`validate_error(数据验证失败)`。
接口示例:
```json
{
"code": "success",
"data": {
"redirect_url": "https://github.com/login/oauth/authorize?client_id=f69102c6b97d90d69768&redirect_uri=http%3A%2F%2Flocalhost%3A5001%2Foauth%2Fauthorize%2Fgithub&scope=user%3Aemail"
},
"message": ""
}
```
带有分页数据的接口会在 `data` 内固定传递 `list` 和 `paginator` 字段,其中 `list` 代表分页后的列表数据,`paginator` 代表分页的数据。
`paginator` 内存在 4 个字段:`current_page(当前页数)` 、`page_size(每页数据条数)`、`total_page(总页数)`、`total_record(总记录条数)`,示例数据如下:
```json
{
"code": "success",
"data": {
"list": [
{
"app_count": 0,
"created_at": 1713105994,
"description": "这是专门用来存储慕课LLMOps课程信息的知识库",
"document_count": 13,
"icon": "https://imooc-llmops-1257184990.cos.ap-guangzhou.myqcloud.com/2024/04/07/96b5e270-c54a-4424-aece-ff8a2b7e4331.png",
"id": "c0759ca8-2d35-4480-83a8-1f41f29d1401",
"name": "慕课LLMOps课程知识库",
"updated_at": 1713106758,
"word_count": 8850
}
],
"paginator": {
"current_page": 1,
"page_size": 20,
"total_page": 1,
"total_record": 2
}
},
"message": ""
}
```
如果接口需要授权,需要在 `headers` 中添加 `Authorization` ,并附加 `access_token` 即可完成授权登录,示例:
```json
Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MTY0NTY3OTgsImlzcyI6ImxsbW9wcyIsInN1YiI6ImM5MDljMWRiLWIyMmUtNGZlNi04OGIyLWIyZTkxZWFiMWE3YiJ9.JDAtWDBBGiXa_XFihfopRe4Cz-RQ9_TAcno9w81tNbE
```
## 01. 应用模块
### 1.1 [todo]获取应用基础信息
- **接口说明**:传递对应的应用 id,获取当前应用的基础信息+配置信息等内容。
- **接口信息**:`授权`+`GET:/apps/:app_id`
- **接口参数**:
- 请求参数:
- `app_id -> uuid`:路由参数,必填,需要获取的应用 id。
- 响应参数:
- `id -> uuid`:应用 id,类型为 uuid。
- `name -> string`:应用名称。
- `icon -> string`:应用图标。
- `description -> string`:应用描述。
- `published_app_config_id -> uuid`:已发布应用配置 id,如果不存在则为 null。
- `drafted_app_config_id -> uuid`:草稿应用配置 id,如果不存在则为 null。
- `debug_conversation_id -> uuid`:调试会话记录 id,如果不存在则为 null。
- `published_app_config/drafted_app_config -> json`:应用配置信息,涵盖草稿配置、已发布配置,如果没有则为 null,两个配置的变量信息一致。
- `id -> uuid`:应用配置 id。
- `model_config -> json`:模型配置,类型为 json。
- `dialog_round -> int`:携带上下文轮数,类型为非负整型。
- `memory_mode -> string`:记忆类型,涵盖长记忆 `long_term_memory` 和 `none` 代表无。
- `status -> string`:应用配置的状态,`drafted` 代表草稿、`published` 代表已发布配置。
- `updated_at -> int`:应用配置的更新时间。
- `created_at -> int`:应用配置的创建时间。
- `updated_at -> int`:应用的更新时间。
- `created_at -> int`:应用的创建时间。
- **响应示例**:
```json
{
"code": "success",
"data": {
"id": "5e7834dc-bbca-4ee5-9591-8f297f5acded",
"name": "慕课LLMOps聊天机器人",
"icon": "https://imooc-llmops-1257184990.cos.ap-guangzhou.myqcloud.com/2024/04/23/e4422149-4cf7-41b3-ad55-ca8d2caa8f13.png",
"description": "这是一个慕课LLMOps的Agent应用",
"published_app_config_id": null,
"drafted_app_config_id": null,
"debug_conversation_id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"published_app_config": null,
"drafted_app_config": {
"id": "755dc464-67cd-42ef-9c56-b7528b44e7c8",
"model_config": {
"dialog_round": 3
},
"memory_mode": "long_term_memory",
"status": "draft",
"updated_at": 1714053834,
"created_at": 1714053834
},
"updated_at": 1714053834,
"created_at": 1714053834
},
"message": ""
}
```
### 1.2 [todo]更新应用草稿配置信息
- **接口说明**:更新应用的草稿配置信息,涵盖:模型配置、长记忆模式等,该接口会查找该应用原始的草稿配置并进行更新,如果没有原始草稿配置,则创建一个新配置作为草稿配置。
- **接口信息**:`授权`+`POST:/apps/:app_id/config`
- **接口参数**:
- 请求参数:
- `app_id -> str`:需要修改配置的应用 id。
- `model_config -> json`:模型配置信息。
- `dialog_round -> int`:携带上下文轮数,类型为非负整型。
- `memory_mode -> string`:记忆类型,涵盖长记忆 `long_term_memory` 和 `none` 代表无。
- **请求示例**:
```json
{
"model_config": {
"dialog_round": 10
},
"memory_mode": "long_term_memory"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "更新AI应用配置成功"
}
```
### 1.3 [todo]获取应用调试长记忆
- **接口说明**:用于获取指定应用的长记忆内容,如果该应用并没有开启长记忆,则会抛出错误信息。
- **接口信息**:`授权`+`GET:/apps/:app_id/long-term-memory`
- **接口参数**:
- 请求参数:
- `app_id -> str`:需要获取长记忆的应用 id。
- 响应参数:
- `summary -> str`:该应用最新调试会话的长记忆内容。
- **响应示例**:
```json
{
"code": "success",
"data": {
"summary": "人类自我介绍为慕小课,并要求人工智能解释LLM(大型语言模型)的概念。人工智能将LLM描述为一种基于深度学习的模型,通常建立在Transformer架构上,用于自然语言处理任务。LLM经历了一个预训练阶段,在那里他们从大量的文本数据中学习语言结构,比如维基百科的文章和书籍。它们利用自我注意机制来有效地处理长程依赖关系。经过预训练后,LLM可以针对特定的应用程序进行微调,使其功能适应文本生成、理解和分类等任务。LLM由于其多功能性和强大的语言理解和生成能力,被广泛应用于虚拟助理、翻译、情绪分析、医疗保健、金融等领域,代表了自然语言处理的前沿技术。"
},
"message": ""
}
```
### 1.4 [todo]更新应用调试长记忆
- **接口说明**:用于更新对应应用的调试长记忆内容,如果应用没有开启长记忆功能,则调用接口会发生报错。
- **接口信息**:`授权`+`POST:/apps/:app_id/long-term-memory`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要更新长记忆的应用 id。
- `summary -> str`:需要更新的长记忆内容。
- **请求示例**:
```json
{
"summary": "人类介绍自己叫慕小课,喜欢打篮球。"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "更新AI应用长记忆成功"
}
```
### 1.5 [todo]应用调试对话
- **接口说明**:用于在编排 AI 应用时进行 debug 调试,如果当前应用没有草稿配置,则使用发布配置进行调试,如果有草稿配置则以草稿配置信息进行调试。
- **接口信息**:`授权`+`POST:/apps/:app_id/debug`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要调试的 AI 应用 id,格式为 uuid。
- `query -> str`:用户发起的提问信息。
- 响应参数:
- `id -> uuid`:响应消息的 id,类型为 uuid。
- `conversation_id -> uuid`:消息关联会话的 id,类型为 uuid。
- `query -> str`:人类的输入字符串。
- `answer -> str`:AI 的生成内容。
- `answer_tokens -> int`:生成内容消耗的 Token 数。
- `response_latency -> float`:响应消耗的时间,单位为毫秒。
- `updated_at -> int`:消息的更新时间。
- `created_at -> int`:消息的创建时间。
- **请求示例**:
```json
{
"query": "能详细讲解下LLM是什么吗?"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {
"id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"conversation_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8",
"query": "能详细讲解下LLM是什么吗?",
"answer": "LLM 即 Large Language Model,大语言模型,是一种基于深度学习的自然语言处理模型,具有很高的语言理解和生成能力,能够处理各式各样的自然语言任务,例如文本生成、问答、翻译、摘要等。它通过在大量的文本数据上进行训练,学习到语言的模式、结构和语义知识。",
"answer_tokens": 1454,
"response_latency": 8541,
"updated_at": 1714053834,
"created_at": 1714053834
},
"message": ""
}
```
### 1.6 [todo]获取应用调试历史对话列表
- **接口说明**:用于获取应用调试历史对话列表信息,该接口支持分页,单次最多返回 20 组对话消息,并且分页以时间字段进行降序,接口不会返回软删除对应的数据。
- **接口信息**:`授权`+`GET:/apps/:app_id/messages`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要调试的 AI 应用 id,格式为 uuid。
- 响应参数:
- `id -> uuid`:响应消息的 id,类型为 uuid。
- `conversation_id -> uuid`:消息关联会话的 id,类型为 uuid。
- `query -> str`:人类的输入字符串。
- `answer -> str`:AI 的生成内容。
- `answer_tokens -> int`:生成内容消耗的 Token 数。
- `response_latency -> float`:响应消耗的时间,单位为毫秒。
- `updated_at -> int`:消息的更新时间。
- `created_at -> int`:消息的创建时间。
- **响应示例**:
```json
{
"code": "success",
"data": {
"list": [
{
"id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"conversation_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8",
"query": "能详细讲解下LLM是什么吗?",
"answer": "LLM 即 Large Language Model,大语言模型,是一种基于深度学习的自然语言处理模型,具有很高的语言理解和生成能力,能够处理各式各样的自然语言任务,例如文本生成、问答、翻译、摘要等。它通过在大量的文本数据上进行训练,学习到语言的模式、结构和语义知识。",
"answer_tokens": 1454,
"response_latency": 8541,
"updated_at": 1714053834,
"created_at": 1714053834
}
],
"paginator": {
"current_page": 1,
"page_size": 20,
"total_page": 1,
"total_record": 2
}
},
"message": ""
}
```
### 1.7 [todo]删除特定的调试消息
- **接口说明**:用于删除 AI 应用调试对话过程中指定的消息,该删除会在后端执行软删除操作,并且只有当会话 id 和消息 id 都匹配上时,才会删除对应的调试消息。
- **接口信息**:`授权`+`POST:/apps/:app_id/messages/:message_id/delete`
- **接口参数**:
- 请求参数:
- `app_id -> uuid`:路由参数,需要删除消息归属的应用 id,格式为 uuid。
- `message_id -> uuid`:路由参数,需要删除的消息 id,格式为 uuid。
- **请求示例**:
```json
{
"app_id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"message_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "删除调试信息成功"
}
```
14. 递归字符文本分割器的使用与运行流程
1. 递归字符文本分割器示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 14:39
@Author : thezehui@gmail.com
@File : 1.递归字符文本分割器示例.py
"""
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = UnstructuredMarkdownLoader("./项目API文档.md")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
add_start_index=True,
)
chunks = text_splitter.split_documents(documents)
for chunk in chunks:
print(f"块大小: {len(chunk.page_content)}, 元数据: {chunk.metadata}")
2. 程序代码递归分割示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 8:21
@Author : thezehui@gmail.com
@File : 2.程序代码递归分割示例.py
"""
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter, Language
loader = UnstructuredFileLoader("./demo.py")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON,
chunk_size=500,
chunk_overlap=50,
add_start_index=True,
)
chunks = text_splitter.split_documents(documents)
for chunk in chunks:
print(f"块大小: {len(chunk.page_content)}, 元数据: {chunk.metadata}")
print(chunks[2].page_content)
3. 分割中英文场景示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 10:53
@Author : thezehui@gmail.com
@File : 3.分割中英文场景示例.py
"""
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
# 1.创建加载器和文本分割器
loader = UnstructuredMarkdownLoader("./项目API文档.md")
separators = [
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s", # 英文标点符号后面通常需要加空格
";|;\s",
",|,\s",
" ",
""
]
text_splitter = RecursiveCharacterTextSplitter(
separators=separators,
is_separator_regex=True,
chunk_size=500,
chunk_overlap=50,
add_start_index=True,
)
# 2.加载文档与分割
documents = loader.load()
chunks = text_splitter.split_documents(documents)
for chunk in chunks:
print(f"块大小: {len(chunk.page_content)}, 元数据: {chunk.metadata}")
print(chunks[2].page_content)
4. demo
python
复制代码
from __future__ import annotations
import re
from typing import Any, List, Optional
from langchain_text_splitters.base import Language, TextSplitter
class CharacterTextSplitter(TextSplitter):
"""Splitting text that looks at characters."""
def __init__(
self, separator: str = "\n\n", is_separator_regex: bool = False, **kwargs: Any
) -> None:
"""Create a new TextSplitter."""
super().__init__(**kwargs)
self._separator = separator
self._is_separator_regex = is_separator_regex
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks."""
# First we naively split the large input into a bunch of smaller ones.
separator = (
self._separator if self._is_separator_regex else re.escape(self._separator)
)
splits = _split_text_with_regex(text, separator, self._keep_separator)
_separator = "" if self._keep_separator else self._separator
return self._merge_splits(splits, _separator)
def _split_text_with_regex(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = [_splits[i] + _splits[i + 1] for i in range(1, len(_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
class RecursiveCharacterTextSplitter(TextSplitter):
"""Splitting text by recursively look at characters.
Recursively tries to split by different characters to find one
that works.
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = False,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or ["\n\n", "\n", " ", ""]
self._is_separator_regex = is_separator_regex
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = _split_text_with_regex(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
def split_text(self, text: str) -> List[str]:
return self._split_text(text, self._separators)
@classmethod
def from_language(
cls, language: Language, **kwargs: Any
) -> RecursiveCharacterTextSplitter:
separators = cls.get_separators_for_language(language)
return cls(separators=separators, is_separator_regex=True, **kwargs)
@staticmethod
def get_separators_for_language(language: Language) -> List[str]:
if language == Language.CPP:
return [
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nvoid ",
"\nint ",
"\nfloat ",
"\ndouble ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.GO:
return [
# Split along function definitions
"\nfunc ",
"\nvar ",
"\nconst ",
"\ntype ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JAVA:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.KOTLIN:
return [
# Split along class definitions
"\nclass ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\ninternal ",
"\ncompanion ",
"\nfun ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nwhen ",
"\ncase ",
"\nelse ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.JS:
return [
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.TS:
return [
"\nenum ",
"\ninterface ",
"\nnamespace ",
"\ntype ",
# Split along class definitions
"\nclass ",
# Split along function definitions
"\nfunction ",
"\nconst ",
"\nlet ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nswitch ",
"\ncase ",
"\ndefault ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PHP:
return [
# Split along function definitions
"\nfunction ",
# Split along class definitions
"\nclass ",
# Split along control flow statements
"\nif ",
"\nforeach ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PROTO:
return [
# Split along message definitions
"\nmessage ",
# Split along service definitions
"\nservice ",
# Split along enum definitions
"\nenum ",
# Split along option definitions
"\noption ",
# Split along import statements
"\nimport ",
# Split along syntax declarations
"\nsyntax ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.PYTHON:
return [
# First, try to split along class definitions
"\nclass ",
"\ndef ",
"\n\tdef ",
# Now split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RST:
return [
# Split along section titles
"\n=+\n",
"\n-+\n",
"\n\\*+\n",
# Split along directive markers
"\n\n.. *\n\n",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUBY:
return [
# Split along method definitions
"\ndef ",
"\nclass ",
# Split along control flow statements
"\nif ",
"\nunless ",
"\nwhile ",
"\nfor ",
"\ndo ",
"\nbegin ",
"\nrescue ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.RUST:
return [
# Split along function definitions
"\nfn ",
"\nconst ",
"\nlet ",
# Split along control flow statements
"\nif ",
"\nwhile ",
"\nfor ",
"\nloop ",
"\nmatch ",
"\nconst ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SCALA:
return [
# Split along class definitions
"\nclass ",
"\nobject ",
# Split along method definitions
"\ndef ",
"\nval ",
"\nvar ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nmatch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SWIFT:
return [
# Split along function definitions
"\nfunc ",
# Split along class definitions
"\nclass ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo ",
"\nswitch ",
"\ncase ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.MARKDOWN:
return [
# First, try to split along Markdown headings (starting with level 2)
"\n#{1,6} ",
# Note the alternative syntax for headings (below) is not handled here
# Heading level 2
# ---------------
# End of code block
"```\n",
# Horizontal lines
"\n\\*\\*\\*+\n",
"\n---+\n",
"\n___+\n",
# Note that this splitter doesn't handle horizontal lines defined
# by *three or more* of ***, ---, or ___, but this is not handled
"\n\n",
"\n",
" ",
"",
]
elif language == Language.LATEX:
return [
# First, try to split along Latex sections
"\n\\\\chapter{",
"\n\\\\section{",
"\n\\\\subsection{",
"\n\\\\subsubsection{",
# Now split by environments
"\n\\\\begin{enumerate}",
"\n\\\\begin{itemize}",
"\n\\\\begin{description}",
"\n\\\\begin{list}",
"\n\\\\begin{quote}",
"\n\\\\begin{quotation}",
"\n\\\\begin{verse}",
"\n\\\\begin{verbatim}",
# Now split by math environments
"\n\\\begin{align}",
"$$",
"$",
# Now split by the normal type of lines
" ",
"",
]
elif language == Language.HTML:
return [
# First, try to split along HTML tags
"<body",
"<div",
"<p",
"<br",
"<li",
"<h1",
"<h2",
"<h3",
"<h4",
"<h5",
"<h6",
"<span",
"<table",
"<tr",
"<td",
"<th",
"<ul",
"<ol",
"<header",
"<footer",
"<nav",
# Head
"<head",
"<style",
"<script",
"<meta",
"<title",
"",
]
elif language == Language.CSHARP:
return [
"\ninterface ",
"\nenum ",
"\nimplements ",
"\ndelegate ",
"\nevent ",
# Split along class definitions
"\nclass ",
"\nabstract ",
# Split along method definitions
"\npublic ",
"\nprotected ",
"\nprivate ",
"\nstatic ",
"\nreturn ",
# Split along control flow statements
"\nif ",
"\ncontinue ",
"\nfor ",
"\nforeach ",
"\nwhile ",
"\nswitch ",
"\nbreak ",
"\ncase ",
"\nelse ",
# Split by exceptions
"\ntry ",
"\nthrow ",
"\nfinally ",
"\ncatch ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.SOL:
return [
# Split along compiler information definitions
"\npragma ",
"\nusing ",
# Split along contract definitions
"\ncontract ",
"\ninterface ",
"\nlibrary ",
# Split along method definitions
"\nconstructor ",
"\ntype ",
"\nfunction ",
"\nevent ",
"\nmodifier ",
"\nerror ",
"\nstruct ",
"\nenum ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\ndo while ",
"\nassembly ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.COBOL:
return [
# Split along divisions
"\nIDENTIFICATION DIVISION.",
"\nENVIRONMENT DIVISION.",
"\nDATA DIVISION.",
"\nPROCEDURE DIVISION.",
# Split along sections within DATA DIVISION
"\nWORKING-STORAGE SECTION.",
"\nLINKAGE SECTION.",
"\nFILE SECTION.",
# Split along sections within PROCEDURE DIVISION
"\nINPUT-OUTPUT SECTION.",
# Split along paragraphs and common statements
"\nOPEN ",
"\nCLOSE ",
"\nREAD ",
"\nWRITE ",
"\nIF ",
"\nELSE ",
"\nMOVE ",
"\nPERFORM ",
"\nUNTIL ",
"\nVARYING ",
"\nACCEPT ",
"\nDISPLAY ",
"\nSTOP RUN.",
# Split by the normal type of lines
"\n",
" ",
"",
]
elif language == Language.LUA:
return [
# Split along variable and table definitions
"\nlocal ",
# Split along function definitions
"\nfunction ",
# Split along control flow statements
"\nif ",
"\nfor ",
"\nwhile ",
"\nrepeat ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
elif language == Language.HASKELL:
return [
# Split along function definitions
"\nmain :: ",
"\nmain = ",
"\nlet ",
"\nin ",
"\ndo ",
"\nwhere ",
"\n:: ",
"\n= ",
# Split along type declarations
"\ndata ",
"\nnewtype ",
"\ntype ",
"\n:: ",
# Split along module declarations
"\nmodule ",
# Split along import statements
"\nimport ",
"\nqualified ",
"\nimport qualified ",
# Split along typeclass declarations
"\nclass ",
"\ninstance ",
# Split along case expressions
"\ncase ",
# Split along guards in function definitions
"\n| ",
# Split along record field declarations
"\ndata ",
"\n= {",
"\n, ",
# Split by the normal type of lines
"\n\n",
"\n",
" ",
"",
]
else:
raise ValueError(
f"Language {language} is not supported! "
f"Please choose from {list(Language)}"
)
5. 项目API文档
md
复制代码
# LLMOps 项目 API 文档
应用 API 接口统一以 JSON 格式返回,并且包含 3 个字段:`code`、`data` 和 `message`,分别代表`业务状态码`、`业务数据`和`接口附加信息`。
`业务状态码`共有 6 种,其中只有 `success(成功)` 代表业务操作成功,其他 5 种状态均代表失败,并且失败时会附加相关的信息:`fail(通用失败)`、`not_found(未找到)`、`unauthorized(未授权)`、`forbidden(无权限)`和`validate_error(数据验证失败)`。
接口示例:
```json
{
"code": "success",
"data": {
"redirect_url": "https://github.com/login/oauth/authorize?client_id=f69102c6b97d90d69768&redirect_uri=http%3A%2F%2Flocalhost%3A5001%2Foauth%2Fauthorize%2Fgithub&scope=user%3Aemail"
},
"message": ""
}
```
带有分页数据的接口会在 `data` 内固定传递 `list` 和 `paginator` 字段,其中 `list` 代表分页后的列表数据,`paginator` 代表分页的数据。
`paginator` 内存在 4 个字段:`current_page(当前页数)` 、`page_size(每页数据条数)`、`total_page(总页数)`、`total_record(总记录条数)`,示例数据如下:
```json
{
"code": "success",
"data": {
"list": [
{
"app_count": 0,
"created_at": 1713105994,
"description": "这是专门用来存储慕课LLMOps课程信息的知识库",
"document_count": 13,
"icon": "https://imooc-llmops-1257184990.cos.ap-guangzhou.myqcloud.com/2024/04/07/96b5e270-c54a-4424-aece-ff8a2b7e4331.png",
"id": "c0759ca8-2d35-4480-83a8-1f41f29d1401",
"name": "慕课LLMOps课程知识库",
"updated_at": 1713106758,
"word_count": 8850
}
],
"paginator": {
"current_page": 1,
"page_size": 20,
"total_page": 1,
"total_record": 2
}
},
"message": ""
}
```
如果接口需要授权,需要在 `headers` 中添加 `Authorization` ,并附加 `access_token` 即可完成授权登录,示例:
```json
Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MTY0NTY3OTgsImlzcyI6ImxsbW9wcyIsInN1YiI6ImM5MDljMWRiLWIyMmUtNGZlNi04OGIyLWIyZTkxZWFiMWE3YiJ9.JDAtWDBBGiXa_XFihfopRe4Cz-RQ9_TAcno9w81tNbE
```
## 01. 应用模块
### 1.1 [todo]获取应用基础信息
- **接口说明**:传递对应的应用 id,获取当前应用的基础信息+配置信息等内容。
- **接口信息**:`授权`+`GET:/apps/:app_id`
- **接口参数**:
- 请求参数:
- `app_id -> uuid`:路由参数,必填,需要获取的应用 id。
- 响应参数:
- `id -> uuid`:应用 id,类型为 uuid。
- `name -> string`:应用名称。
- `icon -> string`:应用图标。
- `description -> string`:应用描述。
- `published_app_config_id -> uuid`:已发布应用配置 id,如果不存在则为 null。
- `drafted_app_config_id -> uuid`:草稿应用配置 id,如果不存在则为 null。
- `debug_conversation_id -> uuid`:调试会话记录 id,如果不存在则为 null。
- `published_app_config/drafted_app_config -> json`:应用配置信息,涵盖草稿配置、已发布配置,如果没有则为 null,两个配置的变量信息一致。
- `id -> uuid`:应用配置 id。
- `model_config -> json`:模型配置,类型为 json。
- `dialog_round -> int`:携带上下文轮数,类型为非负整型。
- `memory_mode -> string`:记忆类型,涵盖长记忆 `long_term_memory` 和 `none` 代表无。
- `status -> string`:应用配置的状态,`drafted` 代表草稿、`published` 代表已发布配置。
- `updated_at -> int`:应用配置的更新时间。
- `created_at -> int`:应用配置的创建时间。
- `updated_at -> int`:应用的更新时间。
- `created_at -> int`:应用的创建时间。
- **响应示例**:
```json
{
"code": "success",
"data": {
"id": "5e7834dc-bbca-4ee5-9591-8f297f5acded",
"name": "慕课LLMOps聊天机器人",
"icon": "https://imooc-llmops-1257184990.cos.ap-guangzhou.myqcloud.com/2024/04/23/e4422149-4cf7-41b3-ad55-ca8d2caa8f13.png",
"description": "这是一个慕课LLMOps的Agent应用",
"published_app_config_id": null,
"drafted_app_config_id": null,
"debug_conversation_id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"published_app_config": null,
"drafted_app_config": {
"id": "755dc464-67cd-42ef-9c56-b7528b44e7c8",
"model_config": {
"dialog_round": 3
},
"memory_mode": "long_term_memory",
"status": "draft",
"updated_at": 1714053834,
"created_at": 1714053834
},
"updated_at": 1714053834,
"created_at": 1714053834
},
"message": ""
}
```
### 1.2 [todo]更新应用草稿配置信息
- **接口说明**:更新应用的草稿配置信息,涵盖:模型配置、长记忆模式等,该接口会查找该应用原始的草稿配置并进行更新,如果没有原始草稿配置,则创建一个新配置作为草稿配置。
- **接口信息**:`授权`+`POST:/apps/:app_id/config`
- **接口参数**:
- 请求参数:
- `app_id -> str`:需要修改配置的应用 id。
- `model_config -> json`:模型配置信息。
- `dialog_round -> int`:携带上下文轮数,类型为非负整型。
- `memory_mode -> string`:记忆类型,涵盖长记忆 `long_term_memory` 和 `none` 代表无。
- **请求示例**:
```json
{
"model_config": {
"dialog_round": 10
},
"memory_mode": "long_term_memory"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "更新AI应用配置成功"
}
```
### 1.3 [todo]获取应用调试长记忆
- **接口说明**:用于获取指定应用的长记忆内容,如果该应用并没有开启长记忆,则会抛出错误信息。
- **接口信息**:`授权`+`GET:/apps/:app_id/long-term-memory`
- **接口参数**:
- 请求参数:
- `app_id -> str`:需要获取长记忆的应用 id。
- 响应参数:
- `summary -> str`:该应用最新调试会话的长记忆内容。
- **响应示例**:
```json
{
"code": "success",
"data": {
"summary": "人类自我介绍为慕小课,并要求人工智能解释LLM(大型语言模型)的概念。人工智能将LLM描述为一种基于深度学习的模型,通常建立在Transformer架构上,用于自然语言处理任务。LLM经历了一个预训练阶段,在那里他们从大量的文本数据中学习语言结构,比如维基百科的文章和书籍。它们利用自我注意机制来有效地处理长程依赖关系。经过预训练后,LLM可以针对特定的应用程序进行微调,使其功能适应文本生成、理解和分类等任务。LLM由于其多功能性和强大的语言理解和生成能力,被广泛应用于虚拟助理、翻译、情绪分析、医疗保健、金融等领域,代表了自然语言处理的前沿技术。"
},
"message": ""
}
```
### 1.4 [todo]更新应用调试长记忆
- **接口说明**:用于更新对应应用的调试长记忆内容,如果应用没有开启长记忆功能,则调用接口会发生报错。
- **接口信息**:`授权`+`POST:/apps/:app_id/long-term-memory`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要更新长记忆的应用 id。
- `summary -> str`:需要更新的长记忆内容。
- **请求示例**:
```json
{
"summary": "人类介绍自己叫慕小课,喜欢打篮球。"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "更新AI应用长记忆成功"
}
```
### 1.5 [todo]应用调试对话
- **接口说明**:用于在编排 AI 应用时进行 debug 调试,如果当前应用没有草稿配置,则使用发布配置进行调试,如果有草稿配置则以草稿配置信息进行调试。
- **接口信息**:`授权`+`POST:/apps/:app_id/debug`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要调试的 AI 应用 id,格式为 uuid。
- `query -> str`:用户发起的提问信息。
- 响应参数:
- `id -> uuid`:响应消息的 id,类型为 uuid。
- `conversation_id -> uuid`:消息关联会话的 id,类型为 uuid。
- `query -> str`:人类的输入字符串。
- `answer -> str`:AI 的生成内容。
- `answer_tokens -> int`:生成内容消耗的 Token 数。
- `response_latency -> float`:响应消耗的时间,单位为毫秒。
- `updated_at -> int`:消息的更新时间。
- `created_at -> int`:消息的创建时间。
- **请求示例**:
```json
{
"query": "能详细讲解下LLM是什么吗?"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {
"id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"conversation_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8",
"query": "能详细讲解下LLM是什么吗?",
"answer": "LLM 即 Large Language Model,大语言模型,是一种基于深度学习的自然语言处理模型,具有很高的语言理解和生成能力,能够处理各式各样的自然语言任务,例如文本生成、问答、翻译、摘要等。它通过在大量的文本数据上进行训练,学习到语言的模式、结构和语义知识。",
"answer_tokens": 1454,
"response_latency": 8541,
"updated_at": 1714053834,
"created_at": 1714053834
},
"message": ""
}
```
### 1.6 [todo]获取应用调试历史对话列表
- **接口说明**:用于获取应用调试历史对话列表信息,该接口支持分页,单次最多返回 20 组对话消息,并且分页以时间字段进行降序,接口不会返回软删除对应的数据。
- **接口信息**:`授权`+`GET:/apps/:app_id/messages`
- **接口参数**:
- 请求参数:
- `app_id -> str`:路由参数,需要调试的 AI 应用 id,格式为 uuid。
- 响应参数:
- `id -> uuid`:响应消息的 id,类型为 uuid。
- `conversation_id -> uuid`:消息关联会话的 id,类型为 uuid。
- `query -> str`:人类的输入字符串。
- `answer -> str`:AI 的生成内容。
- `answer_tokens -> int`:生成内容消耗的 Token 数。
- `response_latency -> float`:响应消耗的时间,单位为毫秒。
- `updated_at -> int`:消息的更新时间。
- `created_at -> int`:消息的创建时间。
- **响应示例**:
```json
{
"code": "success",
"data": {
"list": [
{
"id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"conversation_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8",
"query": "能详细讲解下LLM是什么吗?",
"answer": "LLM 即 Large Language Model,大语言模型,是一种基于深度学习的自然语言处理模型,具有很高的语言理解和生成能力,能够处理各式各样的自然语言任务,例如文本生成、问答、翻译、摘要等。它通过在大量的文本数据上进行训练,学习到语言的模式、结构和语义知识。",
"answer_tokens": 1454,
"response_latency": 8541,
"updated_at": 1714053834,
"created_at": 1714053834
}
],
"paginator": {
"current_page": 1,
"page_size": 20,
"total_page": 1,
"total_record": 2
}
},
"message": ""
}
```
### 1.7 [todo]删除特定的调试消息
- **接口说明**:用于删除 AI 应用调试对话过程中指定的消息,该删除会在后端执行软删除操作,并且只有当会话 id 和消息 id 都匹配上时,才会删除对应的调试消息。
- **接口信息**:`授权`+`POST:/apps/:app_id/messages/:message_id/delete`
- **接口参数**:
- 请求参数:
- `app_id -> uuid`:路由参数,需要删除消息归属的应用 id,格式为 uuid。
- `message_id -> uuid`:路由参数,需要删除的消息 id,格式为 uuid。
- **请求示例**:
```json
{
"app_id": "1550b71a-1444-47ed-a59d-c2f080fbae94",
"message_id": "2d7d3e3f-95c9-4d9d-ba9c-9daaf09cc8a8"
}
```
- **响应示例**:
```json
{
"code": "success",
"data": {},
"message": "删除调试信息成功"
}
```
15. 语义文档分割器与其他内容分割器的使用
1. 语义分割器使用示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 14:33
@Author : thezehui@gmail.com
@File : 1.语义分割器使用示例.py
"""
import dotenv
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai import OpenAIEmbeddings
dotenv.load_dotenv()
# 1.构建加载器和文本分割器
loader = UnstructuredFileLoader("./科幻短篇.txt")
text_splitter = SemanticChunker(
embeddings=OpenAIEmbeddings(model="text-embedding-3-small"),
number_of_chunks=10,
add_start_index=True,
sentence_split_regex=r"(?<=[。?!.?!])"
)
# 2.加载文本与分割
documents = loader.load()
chunks = text_splitter.split_documents(documents)
# 3.循环打印
for chunk in chunks:
print(f"块大小: {len(chunk.page_content)}, 元数据: {chunk.metadata}")
2. 其他文档分割器使用示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 14:55
@Author : thezehui@gmail.com
@File : 2.其他文档分割器使用示例.py
"""
from langchain_text_splitters import HTMLHeaderTextSplitter
# 1.构建文本与分割标题
html_string = """
<!DOCTYPE html>
<html>
<body>
<div>
<h1>标题1</h1>
<p>关于标题1的一些介绍文本。</p>
<div>
<h2>子标题1</h2>
<p>关于子标题1的一些介绍文本。</p>
<h3>子子标题1</h3>
<p>关于子子标题1的一些文本。</p>
<h3>子子标题2</h3>
<p>关于子子标题2的一些文本。</p>
</div>
<div>
<h3>子标题2</h2>
<p>关于子标题2的一些文本。</p>
</div>
<br>
<p>关于标题1的一些结束文本。</p>
</div>
</body>
</html>
"""
headers_to_split_on = [
("h1", "一级标题"),
("h2", "二级标题"),
("h3", "三级标题"),
]
# 2.创建分割器并分割
text_splitter = HTMLHeaderTextSplitter(headers_to_split_on)
chunks = text_splitter.split_text(html_string)
# 3.输出分割内容
for chunk in chunks:
print(chunk)
3. 递归JSON分割器示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 21:56
@Author : thezehui@gmail.com
@File : 3.递归JSON分割器示例.py
"""
import json
import requests
from langchain_text_splitters import RecursiveJsonSplitter
# 1.获取并加载json
url = "https://api.smith.langchain.com/openapi.json"
json_data = requests.get(url).json()
print(len(json.dumps(json_data)))
# 2.递归JSON分割器
text_splitter = RecursiveJsonSplitter(max_chunk_size=300)
# 3.分割json数据并创建文档
json_chunks = text_splitter.split_json(json_data)
chunks = text_splitter.create_documents(json_chunks)
# 4.输出内容
count = 0
for chunk in chunks:
count += len(chunk.page_content)
print(count)
4. 基于标记的分割器
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 22:13
@Author : thezehui@gmail.com
@File : 4.基于标记的分割器.py
"""
import tiktoken
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
def calculate_token_count(query: str) -> int:
"""计算传入文本的token数"""
encoding = tiktoken.encoding_for_model("text-embedding-3-large")
return len(encoding.encode(query))
# 1.定义加载器和文本分割器
loader = UnstructuredFileLoader("./科幻短篇.txt")
text_splitter = RecursiveCharacterTextSplitter(
separators=[
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s", # 英文标点符号后面通常需要加空格
";|;\s",
",|,\s",
" ",
""
],
is_separator_regex=True,
chunk_size=500,
chunk_overlap=50,
length_function=calculate_token_count,
)
# 2.加载文档并执行分割
documents = loader.load()
chunks = text_splitter.split_documents(documents)
# 3.循环打印分块内容
for chunk in chunks:
print(f"块大小: {len(chunk.page_content)}, 元数据: {chunk.metadata}")
16. 自定义LangChain文档分割器技巧
1. 自定义分割器示例
python
复制代码
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/7/2 22:38
@Author : thezehui@gmail.com
@File : 1.自定义分割器示例.py
"""
from typing import List
import jieba.analyse
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_text_splitters import TextSplitter
class CustomTextSplitter(TextSplitter):
"""自定义文本分割器"""
def __init__(self, seperator: str, top_k: int = 10, **kwargs):
"""构造函数,传递分割器还有需要提取的关键词数,默认为10"""
super().__init__(**kwargs)
self._seperator = seperator
self._top_k = top_k
def split_text(self, text: str) -> List[str]:
"""传递对应的文本执行分割并提取分割数据的关键词,组成文档列表返回"""
# 1.根据传递的分隔符分割传入的文本
split_texts = text.split(self._seperator)
# 2.提取分割出来的每一段文本的关键词,数量为self._top_k个
text_keywords = []
for split_text in split_texts:
text_keywords.append(jieba.analyse.extract_tags(split_text, self._top_k))
# 3.将关键词使用逗号进行拼接组成字符串列表并返回
return [",".join(keywords) for keywords in text_keywords]
# 1.创建加载器与分割器
loader = UnstructuredFileLoader("./科幻短篇.txt")
text_splitter = CustomTextSplitter("\n\n", 10)
# 2.加载文档并分割
documents = loader.load()
chunks = text_splitter.split_documents(documents)
# 3.循环遍历文档信息
for chunk in chunks:
print(chunk.page_content)