python 如何调用GPT系列的api接口,实现想要的功能

目录

问题描述:

问题解决:


问题描述:

随着各种LLMs (Large Language Models)的出现,如何调用各种LLMs的api成为了经常会遇见的问题。

问题解决:

下面仅以生成给定sentence的复述句为例,说明如何应用。

data: 存放在.txt文件中,按行存放,每一行是一个句子

⚠️: 只能在本地运行,不可以在服务器上运行,并且,🪜要打开。

代码实现:

python 复制代码
import openai
import time
import os
import random 

from tqdm import tqdm
# openai.api_key = "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"
# openai.api_key = "sk-huUlpXTcDnvNevXgB1399aCa9dB247F599D81737B7CfD927" # 实验室共有


openai.api_key = "YOUR API KEY" # GPT4.0
openai.api_base = 'https://api.ngapi.top/v1'

def get_response(prompt, temperature=0.5, max_tokens=2048):
  completion = openai.ChatCompletion.create(
    # model="gpt-3.5-turbo",
    model="gpt-4",
    temperature=0,
    top_p=0,
    # max_tokens=max_tokens,
    messages=[
      {"role": "user", "content": f"{prompt}"}
    ]
  )
  return completion
  
python 复制代码
def generate_prompt(all_sentences): # all_asps是一个list

    prompts = []
    for cur_sent in all_sentences: # all_compared_pair是一个list:[asp,sub,obj]
        # prompt = "Please generate just one synonymous sentence for the given sentence. The current given sentence is : '{}'".format(cur_sent)
        prompt = f"Generate a paraphrased version of the following sentence: {cur_sent}"
        prompts.append(prompt)
    return prompts
python 复制代码
import re

def process_sentence(sentence):
    # 定义要添加空格的特殊字符, 比如's
    special_chars = [',', '.', '\'', ''', '"', '"', '(', ')', '[', ']', '{', '}', ':', ';', '?', '!'] # '-', 因为sub,obj中存在很多以'-'为连字符的sub,obj,所以原始句子中,这部分不可以加空格
    # 在特殊字符前添加空格
    for char in special_chars:
        if char == '(': # 特别的,左括号是在后面加空格
            sentence = sentence = re.sub(rf'([{char}])', r'\1 ', sentence)
        else:
            sentence = re.sub(rf'([{char}])', r' \1', sentence)
    return sentence

def get_res(all_sentences, wirte_path):

    if os.path.exists(wirte_path):
        res_file = open(wirte_path)
        begin = len(res_file.readlines()) + 1 # 断点续传
        res_file.close()
    else:
        begin = 0
    
    with open(wirte_path, 'a', buffering=1) as res_file:
        count = 0
        for p in tqdm(generate_prompt(all_sentences), total=len(all_sentences)):
            print(p)
            count += 1
            if count < begin:
                continue
            try:
                res = get_response(p)['choices'][0]['message']['content']
                # print("res", res)
                processed_res = process_sentence(res) # 处理成以单词,标点符号等为分隔符,空格
                # print("processed_res", processed_res)
                if "The server is overloaded or not ready yet" in processed_res:
                    time.sleep(30)
                    res_file.write("####" + '\n')
                    
                else:
                    res_file.write(processed_res + '\n')
            except Exception as e:
                if isinstance(e, openai.error.RateLimitError):
                    time.sleep(30)      # 处理拥挤的函数,就只是睡着而已
                    res = get_response(p)['choices'][0]['message']['content']
                    processed_res = process_sentence(res) # 
                    res_file.write(processed_res + '\n')
                else:
                    print(e)
                    exit(-1)
        # print("the number of prompt is :", count)
python 复制代码
from pdb import set_trace as stop


dataset_list = ['14lap','14res', '15res', '16res']
name_list = ['train', 'test', 'dev']

# name_list = ['dev']
# dataset_list = ['14lap']

for dataset in dataset_list:
    for name in name_list:
        # print("dataste", dataset)
        # print("name", name)
        sentence_path = f"/Users/qtxu/Workplace/Chatgpt/ATE_Task/{dataset}/{name}_sentence.txt"
        write_path = f'/Users/qtxu/Workplace/Chatgpt/ATE_Task/{dataset}/{name}_paraphrase_sentence.txt'

        with open(sentence_path,"r") as fr:
            lines = fr.readlines()
            all_sentences = []
            for line in lines:
                all_sentences.append(line)
        # print("all_sentences", all_sentences)
        get_res(all_sentences, write_path)
相关推荐
工业互联网专业9 分钟前
基于协同过滤算法的小说推荐系统_django+spider
python·django·毕业设计·源码·课程设计·spider·协同过滤算法
星星的月亮叫太阳16 分钟前
large-scale-DRL-exploration 代码阅读 总结
python·算法
Q_Q196328847534 分钟前
python+django/flask基于Echarts+Python的图书零售监测系统设计与实现(带大屏)
spring boot·python·django·flask·node.js·php
深度学习lover1 小时前
<数据集>yolo航拍交通目标识别数据集<目标检测>
人工智能·python·yolo·目标检测·计算机视觉·航拍交通目标识别
程序猿20231 小时前
Python每日一练---第二天:合并两个有序数组
开发语言·python
Coovally AI模型快速验证1 小时前
视觉语言模型(VLM)深度解析:如何用它来处理文档
人工智能·yolo·目标跟踪·语言模型·自然语言处理·开源
权泽谦1 小时前
用 Flask + OpenAI API 打造一个智能聊天机器人(附完整源码与部署教程)
python·机器人·flask
njxiejing1 小时前
Numpy一维、二维、三维数组切片实例
开发语言·python·numpy
小小工匠2 小时前
Vibe Coding - 免费使用claude code 、gpt-5、grok-code-fast-1进行氛围编程
gpt·claude code·grok-code-fast1
lskisme2 小时前
springboot maven导入本地jar包
开发语言·python·pycharm