【llm 微调code-llama 训练自己的数据集 一个小案例】

这也是一个通用的方案,使用peft微调LLM。

准备自己的数据集

根据情况改就行了,jsonl格式,三个字段:context, answer, question

python 复制代码
import pandas as pd
import random
import json


data = pd.read_csv('dataset.csv')
train_data = data[['prompt','Code']]
train_data = train_data.values.tolist()

random.shuffle(train_data)


train_num = int(0.8 * len(train_data))

with open('train_data.jsonl', 'w') as f:
    for d in train_data[:train_num]:
        d = {
            'context':'',
            'question':d[0],
            'answer':d[1]
        }
        f.write(json.dumps(d)+'\n')
with open('val_data.jsonl', 'w') as f:
    for d in train_data[train_num:]:
        d = {
            'context':'',
            'question':d[0],
            'answer':d[1]
        }
        f.write(json.dumps(d)+'\n')

初始化

python 复制代码
from datetime import datetime
import os
import sys

import torch

from peft import (
    LoraConfig,
    get_peft_model,
    get_peft_model_state_dict,
    prepare_model_for_int8_training,
)
from transformers import (AutoTokenizer, AutoModelForCausalLM, LlamaForCausalLM,
                          TrainingArguments, Trainer, DataCollatorForSeq2Seq)

# 加载自己的数据集
from datasets import load_dataset

train_dataset = load_dataset('json', data_files='train_data.jsonl', split='train')
eval_dataset = load_dataset('json', data_files='val_data.jsonl', split='train')

# 读取模型
base_model = 'CodeLlama-7b-Instruct-hf'

model = AutoModelForCausalLM.from_pretrained(
    base_model,
    load_in_8bit=True,
    torch_dtype=torch.float16,
    device_map="auto",
    low_cpu_mem_usage=True
)

tokenizer = AutoTokenizer.from_pretrained(base_model)

微调前的效果

python 复制代码
tokenizer.pad_token = tokenizer.eos_token
prompt = """You are programming coder.

Now answer the question:

{}"""
prompts = [prompt.format(train_dataset[i]['question']) for i in [1,20,32,45,67]]

model_input = tokenizer(prompts, return_tensors="pt", padding=True).to("cuda")


model.eval()
with torch.no_grad():
    outputs = model.generate(**model_input, max_new_tokens=300)
    outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)

print(outputs)

进行微调

python 复制代码
tokenizer.add_eos_token = True
tokenizer.pad_token_id = 0
tokenizer.padding_side = "left"


def tokenize(prompt):
    result = tokenizer(
        prompt,
        truncation=True,
        max_length=512,
        padding=False,
        return_tensors=None,
    )

    # "self-supervised learning" means the labels are also the inputs:
    result["labels"] = result["input_ids"].copy()

    return result


def generate_and_tokenize_prompt(data_point):
    full_prompt =f"""You are a powerful programming model. Your job is to answer questions about a database. You are given a question.

You must output the code that answers the question.

### Input:
{data_point["question"]}

### Response:
{data_point["answer"]}
"""
    return tokenize(full_prompt)


tokenized_train_dataset = train_dataset.map(generate_and_tokenize_prompt)
tokenized_val_dataset = eval_dataset.map(generate_and_tokenize_prompt)


model.train() # put model back into training mode
model = prepare_model_for_int8_training(model)

config = LoraConfig(
    r=16,
    lora_alpha=16,
    target_modules=[
    "q_proj",
    "k_proj",
    "v_proj",
    "o_proj",
],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)

# keeps Trainer from trying its own DataParallelism when more than 1 gpu is available
if torch.cuda.device_count() > 1:
    model.is_parallelizable = True
    model.model_parallel = True



batch_size = 128
per_device_train_batch_size = 32
gradient_accumulation_steps = batch_size // per_device_train_batch_size
output_dir = "code-llama-ft"

training_args = TrainingArguments(
        per_device_train_batch_size=per_device_train_batch_size,
        gradient_accumulation_steps=gradient_accumulation_steps,
        warmup_steps=100,
        max_steps=400,
        learning_rate=3e-4,
        fp16=True,
        logging_steps=10,
        optim="adamw_torch",
        evaluation_strategy="steps", # if val_set_size > 0 else "no",
        save_strategy="steps",
        eval_steps=20,
        save_steps=20,
        output_dir=output_dir,
        load_best_model_at_end=False,
        group_by_length=True, # group sequences of roughly the same length together to speed up training
        report_to="none", # if use_wandb else "none", wandb
        run_name=f"codellama-{datetime.now().strftime('%Y-%m-%d-%H-%M')}", # if use_wandb else None,
    )

trainer = Trainer(
    model=model,
    train_dataset=tokenized_train_dataset,
    eval_dataset=tokenized_val_dataset,
    args=training_args,
    data_collator=DataCollatorForSeq2Seq(
        tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
    ),
)

开始训练

python 复制代码
model.config.use_cache = False

old_state_dict = model.state_dict
model.state_dict = (lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())).__get__(
    model, type(model)
)
if torch.__version__ >= "2" and sys.platform != "win32":
    print("compiling the model")
    model = torch.compile(model)
trainer.train()

进行测试

python 复制代码
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer

base_model = 'CodeLlama-7b-Instruct-hf'
model = AutoModelForCausalLM.from_pretrained(
    base_model,
    load_in_8bit=True,
    torch_dtype=torch.float16,
    device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(base_model)


output_dir = "code-llama-ft"
model = PeftModel.from_pretrained(model, output_dir)


eval_prompt = """You are a powerful programming model. Your job is to answer questions about a database. You are given a question.

You must output the code that answers the question.

### Input:
Write a function in Java that takes an array and returns the sum of the numbers in the array, or 0 if the array is empty. Except the number 13 is very unlucky, so it does not count any 13, or any number that immediately follows a 13.

### Response:
"""

model_input = tokenizer(eval_prompt, return_tensors="pt").to("cuda")

model.eval()
with torch.no_grad():
    outputs = model.generate(**model_input, max_new_tokens=100)[0]
print(tokenizer.decode(outputs, skip_special_tokens=True))

主要参考https://zhuanlan.zhihu.com/p/660933421

相关推荐
IT古董21 分钟前
【机器学习】机器学习的基本分类-半监督学习(Semi-supervised Learning)
学习·机器学习·分类·半监督学习
千天夜2 小时前
多源多点路径规划:基于启发式动态生成树算法的实现
算法·机器学习·动态规划
Anlici2 小时前
模型训练与数据分析
人工智能·机器学习
知来者逆4 小时前
Binoculars——分析证实大语言模型生成文本的检测和引用量按学科和国家明确显示了使用偏差的多样性和对内容类型的影响
人工智能·深度学习·语言模型·自然语言处理·llm·大语言模型
三万棵雪松6 小时前
1.系统学习-线性回归
算法·机器学习·回归·线性回归·监督学习
Easy数模6 小时前
基于LR/GNB/SVM/KNN/DT算法的鸢尾花分类和K-Means算法的聚类分析
算法·机器学习·支持向量机·分类·聚类
Zhi.C.Yue6 小时前
SVM理论推导
人工智能·机器学习·支持向量机
愚者大大7 小时前
线性分类器(KNN,SVM损失,交叉熵损失,softmax)
人工智能·机器学习·支持向量机
dundunmm8 小时前
机器学习之PCA降维
机器学习·信息可视化·数据挖掘·数据分析
千天夜9 小时前
深度学习中的残差网络、加权残差连接(WRC)与跨阶段部分连接(CSP)详解
网络·人工智能·深度学习·神经网络·yolo·机器学习