python 如何调用GPT系列的api接口,实现想要的功能

目录

问题描述:

问题解决:


问题描述:

随着各种LLMs (Large Language Models)的出现,如何调用各种LLMs的api成为了经常会遇见的问题。

问题解决:

下面仅以生成给定sentence的复述句为例,说明如何应用。

data: 存放在.txt文件中,按行存放,每一行是一个句子

⚠️: 只能在本地运行,不可以在服务器上运行,并且,🪜要打开。

代码实现:

python 复制代码
import openai
import time
import os
import random 

from tqdm import tqdm
# openai.api_key = "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"
# openai.api_key = "sk-huUlpXTcDnvNevXgB1399aCa9dB247F599D81737B7CfD927" # 实验室共有


openai.api_key = "YOUR API KEY" # GPT4.0
openai.api_base = 'https://api.ngapi.top/v1'

def get_response(prompt, temperature=0.5, max_tokens=2048):
  completion = openai.ChatCompletion.create(
    # model="gpt-3.5-turbo",
    model="gpt-4",
    temperature=0,
    top_p=0,
    # max_tokens=max_tokens,
    messages=[
      {"role": "user", "content": f"{prompt}"}
    ]
  )
  return completion
  
python 复制代码
def generate_prompt(all_sentences): # all_asps是一个list

    prompts = []
    for cur_sent in all_sentences: # all_compared_pair是一个list:[asp,sub,obj]
        # prompt = "Please generate just one synonymous sentence for the given sentence. The current given sentence is : '{}'".format(cur_sent)
        prompt = f"Generate a paraphrased version of the following sentence: {cur_sent}"
        prompts.append(prompt)
    return prompts
python 复制代码
import re

def process_sentence(sentence):
    # 定义要添加空格的特殊字符, 比如's
    special_chars = [',', '.', '\'', ''', '"', '"', '(', ')', '[', ']', '{', '}', ':', ';', '?', '!'] # '-', 因为sub,obj中存在很多以'-'为连字符的sub,obj,所以原始句子中,这部分不可以加空格
    # 在特殊字符前添加空格
    for char in special_chars:
        if char == '(': # 特别的,左括号是在后面加空格
            sentence = sentence = re.sub(rf'([{char}])', r'\1 ', sentence)
        else:
            sentence = re.sub(rf'([{char}])', r' \1', sentence)
    return sentence

def get_res(all_sentences, wirte_path):

    if os.path.exists(wirte_path):
        res_file = open(wirte_path)
        begin = len(res_file.readlines()) + 1 # 断点续传
        res_file.close()
    else:
        begin = 0
    
    with open(wirte_path, 'a', buffering=1) as res_file:
        count = 0
        for p in tqdm(generate_prompt(all_sentences), total=len(all_sentences)):
            print(p)
            count += 1
            if count < begin:
                continue
            try:
                res = get_response(p)['choices'][0]['message']['content']
                # print("res", res)
                processed_res = process_sentence(res) # 处理成以单词,标点符号等为分隔符,空格
                # print("processed_res", processed_res)
                if "The server is overloaded or not ready yet" in processed_res:
                    time.sleep(30)
                    res_file.write("####" + '\n')
                    
                else:
                    res_file.write(processed_res + '\n')
            except Exception as e:
                if isinstance(e, openai.error.RateLimitError):
                    time.sleep(30)      # 处理拥挤的函数,就只是睡着而已
                    res = get_response(p)['choices'][0]['message']['content']
                    processed_res = process_sentence(res) # 
                    res_file.write(processed_res + '\n')
                else:
                    print(e)
                    exit(-1)
        # print("the number of prompt is :", count)
python 复制代码
from pdb import set_trace as stop


dataset_list = ['14lap','14res', '15res', '16res']
name_list = ['train', 'test', 'dev']

# name_list = ['dev']
# dataset_list = ['14lap']

for dataset in dataset_list:
    for name in name_list:
        # print("dataste", dataset)
        # print("name", name)
        sentence_path = f"/Users/qtxu/Workplace/Chatgpt/ATE_Task/{dataset}/{name}_sentence.txt"
        write_path = f'/Users/qtxu/Workplace/Chatgpt/ATE_Task/{dataset}/{name}_paraphrase_sentence.txt'

        with open(sentence_path,"r") as fr:
            lines = fr.readlines()
            all_sentences = []
            for line in lines:
                all_sentences.append(line)
        # print("all_sentences", all_sentences)
        get_res(all_sentences, write_path)
相关推荐
kszlgy2 小时前
Day 52 神经网络调参指南
python
wrj的博客4 小时前
python环境安装
python·学习·环境配置
Pyeako4 小时前
深度学习--BP神经网络&梯度下降&损失函数
人工智能·python·深度学习·bp神经网络·损失函数·梯度下降·正则化惩罚
摘星编程5 小时前
OpenHarmony环境下React Native:Geolocation地理围栏
python
充值修改昵称5 小时前
数据结构基础:从二叉树到多叉树数据结构进阶
数据结构·python·算法
q_35488851537 小时前
AI大模型:python新能源汽车推荐系统 协同过滤推荐算法 Echarts可视化 Django框架 大数据毕业设计(源码+文档)✅
大数据·人工智能·python·机器学习·信息可视化·汽车·推荐算法
Yeats_Liao7 小时前
开源生态资源:昇腾社区ModelZoo与DeepSeek的最佳实践路径
python·深度学习·神经网络·架构·开源
被星1砸昏头7 小时前
掌握Python魔法方法(Magic Methods)
jvm·数据库·python
love530love8 小时前
彻底解决 ComfyUI Mixlab 插件 Whisper.available False 的报错
人工智能·windows·python·whisper·win_comfyui
不解风水9 小时前
《深度学习入门:基于 Python 的理论与实现》(斋藤康毅)
人工智能·python·深度学习