LangChain: Models, Prompts 模型和提示词

获取openapikey

#!pip install python-dotenv

复制代码
#!pip install openai
python 复制代码
import os

import openai
​
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
python 复制代码
# account for deprecation of LLM model
import datetime
# Get the current date
current_date = datetime.datetime.now().date()
​
# Define the date after which the model should be set to "gpt-3.5-turbo"
target_date = datetime.date(2024, 6, 12)
​
# Set the model variable based on the current date
if current_date > target_date:
    llm_model = "gpt-3.5-turbo"
else:
    llm_model = "gpt-3.5-turbo-0301"

Chat API : OpenAI

调用openapi

python 复制代码
def get_completion(prompt, model=llm_model):
    messages = [{"role": "user", "content": prompt}]
    response = openai.ChatCompletion.create(
        model=model,
        messages=messages,
        temperature=0, 
    )
    return response.choices[0].message["content"]
​get_completion("What is 1+1?")


customer_email = """
Arrr, I be fuming that me blender lid \
flew off and splattered me kitchen walls \
with smoothie! And to make matters worse,\
the warranty don't cover the cost of \
cleaning up me kitchen. I need yer help \
right now, matey!
"""
style = """American English \
in a calm and respectful tone
"""



prompt = f"""Translate the text \
that is delimited by triple backticks 
into a style that is {style}.
text: ```{customer_email}```
"""
​
print(prompt)
response = get_completion(prompt)



response

deepseek

python 复制代码
import requests
import json

# DeepSeek API 配置
DEEPSEEK_API_KEY = "替换为您的实际API密钥"  # 替换为您的实际API密钥
DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"

def get_deepseek_completion(prompt, model="deepseek-chat"):
    """使用 DeepSeek API 获取回复"""
    headers = {
        "Authorization": f"Bearer {DEEPSEEK_API_KEY}",
        "Content-Type": "application/json"
    }

    payload = {
        "model": model,
        "messages": [{"role": "user", "content": prompt}],
        "temperature": 0
    }

    try:
        response = requests.post(DEEPSEEK_API_URL, headers=headers, json=payload)
        response.raise_for_status()  # 检查HTTP错误

        result = response.json()
        return result['choices'][0]['message']['content']
    except requests.exceptions.RequestException as e:
        print(f"API请求错误: {e}")
        return None
    except (KeyError, IndexError) as e:
        print(f"解析响应错误: {e}")
        return None

customer_email = """
Arrr, I be fuming that me blender lid \
flew off and splattered me kitchen walls \
with smoothie! And to make matters worse,\
the warranty don't cover the cost of \
cleaning up me kitchen. I need yer help \
right now, matey!
"""

style = """American English \
in a calm and respectful tone
"""

prompt = f"""Translate the text \
that is delimited by triple backticks 
into a style that is {style}.
text: ```{customer_email}```
"""

# 获取并打印翻译结果
translation = get_deepseek_completion(prompt)
if translation:
    print("\n翻译结果:")
    print(translation)
OpenAI 方式​ ​DeepSeek 方式​
openai.ChatCompletion.create() requests.post()发送HTTP请求
直接返回对象 解析JSON响应

LangChain+openapi

Let's try how we can do the same using LangChain.

复制代码
#!pip install --upgrade langchain
python 复制代码
from langchain.chat_models import ChatOpenAI



# To control the randomness and creativity of the generated
# text by an LLM, use temperature = 0.0
chat = ChatOpenAI(temperature=0.0, model=llm_model)

template_string = """Translate the text \
that is delimited by triple backticks \
into a style that is {style}. \
text: ```{text}```
"""



from langchain.prompts import ChatPromptTemplate
​
prompt_template = ChatPromptTemplate.from_template(template_string)



customer_style = """American English \
in a calm and respectful tone
"""


customer_email = """
Arrr, I be fuming that me blender lid \
flew off and splattered me kitchen walls \
with smoothie! And to make matters worse, \
the warranty don't cover the cost of \
cleaning up me kitchen. I need yer help \
right now, matey!
"""

customer_messages = prompt_template.format_messages(
                    style=customer_style,
                    text=customer_email)



# Call the LLM to translate to the style of the customer message
customer_response = chat(customer_messages)
print(customer_response.content)



service_reply = """Hey there customer, \
the warranty does not cover \
cleaning expenses for your kitchen \
because it's your fault that \
you misused your blender \
by forgetting to put the lid on before \
starting the blender. \
Tough luck! See ya!
"""
service_style_pirate = """\
a polite tone \
that speaks in English Pirate\
"""



service_messages = prompt_template.format_messages(
    style=service_style_pirate,
    text=service_reply)
​
print(service_messages[0].content)
service_response = chat(service_messages)
print(service_response.content)

Langchain+deepseek

python 复制代码
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate

# 配置 DeepSeek AI
DEEPSEEK_API_KEY = " # 替换为您的实际API密钥" 
DEEPSEEK_API_BASE = "https://api.deepseek.com/v1"

# 创建 DeepSeek 聊天模型
chat = ChatOpenAI(
    temperature=0.0,
    model="deepseek-chat",  # DeepSeek 模型名称
    openai_api_key=DEEPSEEK_API_KEY,
    openai_api_base=DEEPSEEK_API_BASE
)

# 翻译模板
template_string = """Translate the text \
that is delimited by triple backticks \
into a style that is {style}. \
text: ```{text}```
"""

# 创建提示模板
prompt_template = ChatPromptTemplate.from_template(template_string)

# 第一部分:客户邮件翻译
customer_style = """American English in a calm and respectful tone"""
customer_email = """
Arrr, I be fuming that me blender lid \
flew off and splattered me kitchen walls \
with smoothie! And to make matters worse, \
the warranty don't cover the cost of \
cleaning up me kitchen. I need yer help \
right now, matey!
"""

# 格式化消息
customer_messages = prompt_template.format_messages(
    style=customer_style,
    text=customer_email
)

# 调用 DeepSeek 翻译
customer_response = chat(customer_messages)
print("客户邮件翻译结果:")
print(customer_response.content)
print("\n" + "-"*50 + "\n")

# 第二部分:服务回复翻译
service_reply = """Hey there customer, \
the warranty does not cover \
cleaning expenses for your kitchen \
because it's your fault that \
you misused your blender \
by forgetting to put the lid on before \
starting the blender. \
Tough luck! See ya!
"""

service_style_pirate = """a polite tone that speaks in English Pirate"""

# 格式化消息
service_messages = prompt_template.format_messages(
    style=service_style_pirate,
    text=service_reply
)

# 打印格式化后的提示
print("服务回复提示内容:")
print(service_messages[0].content)
print("\n" + "-"*50 + "\n")

# 调用 DeepSeek 翻译
service_response = chat(service_messages)
print("服务回复翻译结果:")
print(service_response.content)

Output Parsers

python 复制代码
{
  "gift": False,
  "delivery_days": 5,
  "price_value": "pretty affordable!"
}
customer_review = """\
This leaf blower is pretty amazing.  It has four settings:\
candle blower, gentle breeze, windy city, and tornado. \
It arrived in two days, just in time for my wife's \
anniversary present. \
I think my wife liked it so much she was speechless. \
So far I've been the only one using it, and I've been \
using it every other morning to clear the leaves on our lawn. \
It's slightly more expensive than the other leaf blowers \
out there, but I think it's worth it for the extra features.
"""

review_template = """\
For the following text, extract the following information:

gift: Was the item purchased as a gift for someone else? \
Answer True if yes, False if not or unknown.

delivery_days: How many days did it take for the product \
to arrive? If this information is not found, output -1.

price_value: Extract any sentences about the value or price,\
and output them as a comma separated Python list.

Format the output as JSON with the following keys:
gift
delivery_days
price_value

text: {text}
"""
from langchain.prompts import ChatPromptTemplate

prompt_template = ChatPromptTemplate.from_template(review_template)
print(prompt_template)
messages = prompt_template.format_messages(text=customer_review)
chat = ChatOpenAI(temperature=0.0, model=llm_model)
response = chat(messages)
print(response.content)
type(response.content)
# You will get an error by running this line of code 
# because'gift' is not a dictionary
# 'gift' is a string
response.content.get('gift')

Parse the LLM output string into a Python dictionary

python 复制代码
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
gift_schema = ResponseSchema(name="gift",
                             description="Was the item purchased\
                             as a gift for someone else? \
                             Answer True if yes,\
                             False if not or unknown.")
delivery_days_schema = ResponseSchema(name="delivery_days",
                                      description="How many days\
                                      did it take for the product\
                                      to arrive? If this \
                                      information is not found,\
                                      output -1.")
price_value_schema = ResponseSchema(name="price_value",
                                    description="Extract any\
                                    sentences about the value or \
                                    price, and output them as a \
                                    comma separated Python list.")

response_schemas = [gift_schema, 
                    delivery_days_schema,
                    price_value_schema]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
print(format_instructions)
review_template_2 = """\
For the following text, extract the following information:

gift: Was the item purchased as a gift for someone else? \
Answer True if yes, False if not or unknown.

delivery_days: How many days did it take for the product\
to arrive? If this information is not found, output -1.

price_value: Extract any sentences about the value or price,\
and output them as a comma separated Python list.

text: {text}

{format_instructions}
"""

prompt = ChatPromptTemplate.from_template(template=review_template_2)

messages = prompt.format_messages(text=customer_review, 
                                format_instructions=format_instructions)
print(messages[0].content)
response = chat(messages)
print(response.content)
output_dict = output_parser.parse(response.content)
type(output_dict)
output_dict.get('delivery_days')

参考:LangChain for LLM Application Development - DeepLearning.AI

相关推荐
渡我白衣11 分钟前
深度学习进阶(六)——世界模型与具身智能:AI的下一次跃迁
人工智能·深度学习
人工智能技术咨询.19 分钟前
【无标题】
人工智能·深度学习·transformer
云卓SKYDROID32 分钟前
无人机激光避障技术概述
人工智能·无人机·航电系统·高科技·云卓科技
蜉蝣之翼❉33 分钟前
图像处理之浓度(AI 调研)
图像处理·人工智能·机器学习
mwq3012338 分钟前
Transformer: LayerNorm层归一化模块详解(PyTorch实现)
人工智能
Sherry Wangs1 小时前
显卡算力过高导致PyTorch不兼容的救赎指南
人工智能·pytorch·显卡
小叮当⇔1 小时前
PYcharm——获取天气
ide·python·pycharm
Apache Flink1 小时前
阿里云、Ververica、Confluent、Linkedin携手推进流式创新,共筑智能体AI未来
人工智能·阿里云·云计算
Elastic 中国社区官方博客1 小时前
AI Agent 评估:Elastic 如何测试代理框架
大数据·人工智能·elasticsearch·搜索引擎
霍志杰1 小时前
记一次csv和xlsx之间的转换处理
python