提示词工程(Prompt Engineering)在当下大模型时代尤为重要,写好提示词的关键在于:立角色、述问题 、定目标、补要求。
提示词模板
该部分介绍了langchain中常用的提示词模板:
- string prompt
代码模板:
python
from langchain_core.prompts import PromptTemplate
def init_string_prompt(name:str, count:int, county:str, sex:str) -> str:
prompt = PromptTemplate.from_template("你是一个{name},帮我起{count}个具有{county}特色的{sex}名字")
return prompt.format(name=name, count=count, county=county, sex=sex)
- chat prompt
第一种代码模板:
python
from langchain_core.prompts import ChatPromptTemplate
def init_chat_prompt(name, user_input):
chat_template = ChatPromptTemplate.from_messages(
[
("system", "你是一个起名大师. 你的名字叫{name}."),
("human", "你好{name},你感觉如何?"),
("ai", "你好!我状态非常好!"),
("human", "你叫什么名字呢?"),
("ai", "你好!我叫{name}"),
("human", "{user_input}"),
]
)
return chat_template.format_messages(name=name, user_input=user_input)
第二种代码模板:
python
from langchain_core.prompts import ChatMessagePromptTemplate
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
def init_chat_prompt(subject):
sy = SystemMessage(
content="你是一个起名大师",
additional_kwargs={"大师姓名": "陈瞎子"}
)
hu = HumanMessage(
content="请问大师叫什么?"
)
ai = AIMessage(
content="我叫陈瞎子"
)
prompt = "愿{subject}与你同在!"
chat_message_prompt = ChatMessagePromptTemplate.from_template(role="天行者", template=prompt)
chat = chat_message_prompt.format(subject=subject)
return [sy, hu, ai, chat]
- file prompt
代码模板:
python
from langchain_core.prompts import load_prompt
def init_file_prompt(file):
prompt = load_prompt(file)
return prompt
init_file_prompt(f"gbk_simple_prompt.json").format(name="tom", what="happy")
文件模板:
json
{
"_type": "prompt",
"input_variables": ["name", "what"],
"template": "tell me a {what} story about {name}"
}
- custom prompt
代码模板:
python
from langchain_core.prompts import StringPromptTemplate
import inspect
PROMPT = """\
你是一个非常有经验和天赋的程序员,现在给你如下函数名称,你会按照如下格式,输出这段代码的名称、源代码、中文解释。
函数名称: {function_name}
源代码:
{source_code}
代码解释:
"""
def get_source_code(function_name):
return inspect.getsource(function_name)
class CreateCustomPrompt(StringPromptTemplate):
def format(self, **kwargs) -> str:
source_code = get_source_code(kwargs["function_name"])
prompt = PROMPT.format(
function_name = kwargs["function_name"].__name__, source_code = source_code
)
return prompt
# test
def hello_world(abc):
print("Hello, world!")
return abc
a = CreateCustomPrompt(input_variables = ["function_name"])
pm = a.format(function_name = hello_world)
from langchain_openai import OpenAI
import os
from dotenv import load_dotenv
load_dotenv("/asset/openai.env")
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")
llm = OpenAI(
model="gpt-3.5-turbo-instruct",
temperature=0,
openai_api_key=api_key,
openai_api_base=api_base
)
llm.invoke(pm)
示例选择器
本部分介绍了常用的提示词示例组的筛选方案:
- 根据长度要求智能选择示例
核心逻辑:根据输入的提示词长度综合计算最终长度,智能截取或者添加提示词的示例
代码模板:
python
from langchain_core.prompts import PromptTemplate, FewShotPromptTemplate
from langchain_core.example_selectors import LengthBasedExampleSelector
examples = [
{"input":"happy","output":"sad"},
{"input":"tall","output":"short"},
{"input":"sunny","output":"gloomy"},
{"input":"windy","output":"calm"},
{"input":"高兴","output":"悲伤"}
]
example_prompt = PromptTemplate(
input_variables = ["input","output"],
template = "原词:{input}\n反义:{output}"
)
example_selector = LengthBasedExampleSelector(
examples=examples,
example_prompt=example_prompt,
# 格式化后的提示词最大长度
max_length=25,
)
dynamic_prompt = FewShotPromptTemplate(
example_selector=example_selector,
example_prompt=example_prompt,
prefix="给出每个输入词的反义词",
suffix="原词:{adjective}\n反义:",
input_variables=["adjective"]
)
dynamic_prompt.format(adjective="big")
# 如果输入长度很长,则最终输出会根据长度要求减少
long_string = "big and huge adn massive and large and gigantic and tall and much much much much much much bigger then everyone"
dynamic_prompt.format(adjective=long_string)
- 根据输入相似度选择示例(最大边际相关性)
- MMR是一种在信息检索中常用的方法,它的目标是在相关性和多样性之间找到一个平衡
- MMR会首先找出与输入最相似(即余弦相似度最大)的样本
- 然后在迭代添加样本的过程中,对于与已选择样本过于接近(即相似度过高)的样本进行惩罚
- MMR既能确保选出的样本与输入高度相关,又能保证选出的样本之间有足够的多样性
- 关注如何在相关性和多样性之间找到一个平衡
代码模板:
python
from langchain_core.prompts import PromptTemplate, FewShotPromptTemplate
from langchain_core.example_selectors import MaxMarginalRelevanceExampleSelector
import os
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from dotenv import load_dotenv
load_dotenv("/asset/openai.env")
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")
examples = [
{"input":"happy", "output":"sad"},
{"input":"tall", "output":"short"},
{"input":"sunny", "output":"gloomy"},
{"input":"windy", "output":"calm"},
{"input":"高兴", "output":"悲伤"}
]
example_prompt = PromptTemplate(
input_variables=["input","output"],
template="原词:{input}\n反义:{output}"
)
example_selector = MaxMarginalRelevanceExampleSelector.from_examples(
examples,
# 使用openai的嵌入来做相似性搜索
OpenAIEmbeddings(openai_api_base=api_base, openai_api_key=api_key),
# 设置使用的向量数据库
FAISS,
# 结果条数
k=2,
)
mmr_prompt = FewShotPromptTemplate(
example_selector = example_selector,
example_prompt = example_prompt,
prefix = "给出每个输入词的反义词",
suffix = "原词:{adjective}\n反义:",
input_variables = ["adjective"]
)
mmr_prompt.format(adjective="难过")
- 根据输入相似度选择示例(最大余弦相似度)
- 一种常见的相似度计算方法
- 它通过计算两个向量(在这里,向量可以代表文本、句子或词语)之间的余弦值来衡量它们的相似度
- 余弦值越接近1,表示两个向量越相似
- 主要关注的是如何准确衡量两个向量的相似度
代码模板:
python
from langchain_core.prompts import PromptTemplate, FewShotPromptTemplate
from langchain_core.example_selectors import SemanticSimilarityExampleSelector
import os
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from dotenv import load_dotenv
load_dotenv("/asset/openai.env")
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")
examples = [
{"input":"happy", "output":"sad"},
{"input":"tall", "output":"short"},
{"input":"sunny", "output":"gloomy"},
{"input":"windy", "output":"calm"},
{"input":"高兴", "output":"悲伤"}
]
example_prompt = PromptTemplate(
input_variables=["input","output"],
template="原词:{input}\n反义:{output}"
)
example_selector = SemanticSimilarityExampleSelector.from_examples(
examples,
# 使用openAI嵌入来做相似性搜索
OpenAIEmbeddings(openai_api_key = api_key, openai_api_base = api_base),
# 使用Chroma向量数据库来实现对相似结果的过程存储
Chroma,
# 结果条数
k = 1,
)
similar_prompt = FewShotPromptTemplate(
example_selector = example_selector,
example_prompt = example_prompt,
prefix = "给出每个输入词的反义词",
suffix = "原词: {adjective}\n反义:",
input_variables = ["adjective"],
)
similar_prompt.format(adjective="worried")