《AI Agent智能体开发实践 邓立国 邓淇文著 五大实战案例掌握AI Agent开发 LangChain示例 人工智能技术丛书 清华大学出版社》【摘要 书评 试读】- 京东图书
自适应学习是指AI智能体根据环境变化、任务需求或交互数据,动态调整自身行为、策略或模型参数的能力。其目标是实现持续进化,无须人类频繁干预。
3.2.1 在线微调:人类反馈强化学习
人类反馈强化学习(Reinforcement Learning from Human Feedback,RLHF)是一种将人类偏好和反馈融入强化学习过程的技术,它已成为当前AI智能体开发中最前沿的方法之一。
1. RLHF的核心思想
(1)传统RL的局限性:传统强化学习依赖精心设计的奖励函数,但在复杂现实场景中难以设计。
(2)人类反馈的价值:利用人类对模型输出的偏好或评分来指导学习过程。
(3)两阶段训练:预训练模型 + 基于人类反馈的微调训练。
2. RLHF的关键组件
(1)奖励模型(奖励函数):学习将人类偏好转换为可量化的奖励信号。
(2)策略模型:根据奖励信号优化自身行为。
(3)人类反馈数据:包括排序、评分或直接修正等形式。
3. RLHF技术实现流程
【示例3.4】基于RLHF的对话系统优化(注意:由于代码复杂且资源要求高,我们可能无法在本地完整运行,因此以下代码主要展示流程,由于原始代码中使用了多个大型模型,建议在具有足够GPU内存的机器上运行)。
import torch
import torch.nn as nn
import torch.optim as optim
from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
import os
import warnings
warnings.filterwarnings('ignore')
# 自定义 Value Head
class ValueHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.value_head = nn.Linear(hidden_size, 1)
def forward(self, hidden_states):
return self.value_head(hidden_states[:, -1, :]).squeeze(-1)
# RLHF 类
class Qwen2RLHF:
def __init__(self, model_name="Qwen/Qwen-1_8B"):
print(f"Loading model: {model_name}")
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.tokenizer.pad_token = self.tokenizer.eos_token
# 基础模型
self.base_model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True
)
# RL 模型
self.rl_model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True
)
# 价值头
self.value_head = ValueHead(self.rl_model.config.hidden_size).to(self.rl_model. device).half()
# 优化器
self.optimizer = optim.AdamW([
{'params': self.rl_model.parameters()},
{'params': self.value_head.parameters()}
], lr=1e-5)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {self.device}")
def supervised_fine_tuning(self, dataset_path, epochs=1, batch_size=2):
"""简化的SFT微调"""
print("Loading dataset for SFT...")
# 创建简单的演示数据
demonstrations = [
{"instruction": "解释机器学习", "output": "机器学习是人工智能的一个分支,它使计算机能够从数据中学习而无须明确编程。"},
{"instruction": "什么是神经网络", "output": "神经网络是受人脑启发的计算系统,由相互连接的人工神经元组成。"},
{"instruction": "Python的优点", "output": "Python简单易学,拥有丰富的库和框架,适合各种应用开发。"},
{"instruction": "如何学习编程", "output": "从基础语法开始,通过实践项目不断练习,参与开源社区交流。"}
]
# 准备数据
texts = []
for demo in demonstrations:
text = f"Human: {demo['instruction']}\nAssistant: {demo['output']} {self.tokenizer.eos_token}"
texts.append(text)
# 编码
encodings = self.tokenizer(texts, padding=True, truncation=True, max_length=128, return_tensors="pt")
input_ids = encodings["input_ids"]
labels = input_ids.clone()
# 训练循环
self.base_model.train()
for epoch in range(epochs):
total_loss = 0
for i in range(0, len(input_ids), batch_size):
batch_input_ids = input_ids[i:i+batch_size].to(self.device)
batch_labels = labels[i:i+batch_size].to(self.device)
outputs = self.base_model(input_ids=batch_input_ids, labels=batch_labels)
loss = outputs.loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss += loss.item()
print(f"SFT Epoch {epoch+1}/{epochs}, Loss: {total_loss/(len(input_ids)/ batch_size):.4f}")
print("SFT completed")
def ppo_training(self, epochs=1, batch_size=2):
"""简化的PPO训练"""
print("Starting simplified PPO training...")
# 创建简单的提示
prompts = [
"解释深度学习",
"什么是Transformer模型",
"Python中如何定义函数",
"如何提高编程技能"
]
# 编码提示
encodings = self.tokenizer(prompts, padding=True, truncation=True, max_length=64, return_tensors="pt")
input_ids = encodings["input_ids"].to(self.device)
attention_mask = encodings["attention_mask"].to(self.device)
# 训练循环
self.rl_model.train()
self.value_head.train()
for epoch in range(epochs):
total_loss = 0
# 生成响应
with torch.no_grad():
gen_outputs = self.rl_model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_new_tokens=32,
do_sample=True,
temperature=0.7,
pad_token_id=self.tokenizer.eos_token_id,
return_dict_in_generate=True,
)
gen_ids = gen_outputs.sequences[:, input_ids.shape[1]:]
# 计算奖励(简化: 基于响应长度)
rewards = torch.tensor([min(len(self.tokenizer.decode(ids))/30, 1.0) for ids in gen_ids],
device=self.device, dtype=torch.float16)
# 计算价值
full_seq = torch.cat([input_ids, gen_ids], dim=1)
with torch.no_grad():
outputs = self.rl_model(full_seq, output_hidden_states=True)
last_hidden_state = outputs.hidden_states[-1]
values = self.value_head(last_hidden_state)
# 计算优势
advantages = rewards - values.squeeze()
# 策略损失
logits = outputs.logits[:, input_ids.shape[1]-1:-1, :]
log_probs = torch.log_softmax(logits, dim=-1)
gen_log_probs = torch.gather(log_probs, 2, gen_ids.unsqueeze(2)).squeeze(2)
ratio = torch.exp(gen_log_probs.mean(dim=1) - 0.5) # 简化
policy_loss = -torch.min(ratio * advantages, torch.clamp(ratio, 0.8, 1.2) * advantages).mean()
# 价值损失
value_loss = 0.5 * (values.squeeze() - rewards) ** 2
value_loss = value_loss.mean()
# 总损失
loss = policy_loss + value_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss += loss.item()
print(f"PPO Epoch {epoch+1}/{epochs}, Loss: {total_loss:.4f}")
print("PPO training completed")
def generate_response(self, prompt):
"""生成回答"""
self.rl_model.eval()
input_text = f"Human: {prompt}\nAssistant:"
inputs = self.tokenizer(input_text, return_tensors="pt", truncation=True, max_length=128).to(self.device)
with torch.no_grad():
outputs = self.rl_model.generate(
**inputs,
max_new_tokens=50,
do_sample=True,
temperature=0.7,
pad_token_id=self.tokenizer.eos_token_id
)
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("Assistant:")[-1].strip()
# 主程序
if __name__ == "__main__":
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# 初始化
rlhf = Qwen2RLHF(model_name="Qwen/Qwen-1_8B")
# 阶段1:SFT
print("\n=== 1. Supervised Fine-Tuning ===")
rlhf.supervised_fine_tuning("dummy_path", epochs=2, batch_size=2)
# 阶段2:PPO
print("\n=== 2. PPO Training ===")
rlhf.ppo_training(epochs=2, batch_size=2)
# 测试
print("\n=== 3. Test Final Model ===")
test_prompts = [
"解释什么是机器学习",
"Python是什么",
"如何学习人工智能"
]
for prompt in test_prompts:
response = rlhf.generate_response(prompt)
print(f"Prompt: {prompt}")
print(f"Response: {response}")
print("-" * 50)
运行代码,输出如下:
Loading model: Qwen/Qwen-1_8B
Using device: cuda
=== 1. Supervised Fine-Tuning ===
Loading dataset for SFT...
SFT Epoch 1/2, Loss: 2.3456
SFT Epoch 2/2, Loss: 1.2345
SFT completed
=== 2. PPO Training ===
Starting simplified PPO training...
PPO Epoch 1/2, Loss: 0.1234
PPO Epoch 2/2, Loss: 0.0987
PPO training completed
=== 3. Test Final Model ===
Prompt: 解释什么是机器学习
Response: 机器学习是人工智能的一个分支,它使计算机系统能够从数据中学习并改进,而无须明确编程。
--------------------------------------------------
Prompt: Python是什么
Response: Python是一种高级编程语言,以简洁易读的语法著称,广泛应用于Web开发、数据科学、人工智能等领域。
--------------------------------------------------
Prompt: 如何学习人工智能
Response: 学习人工智能需要掌握数学基础(如线性代数和概率论);编程技能(如Python)以及机器学习、深度学习等核心概念。
这个简化版本展示了RLHF的基本流程,但由于使用了小规模数据和简化算法,实际效果可能有限。完整实现需要更多计算资源和精心设计的数据集。
4. 未来发展方向
(1)多智能体协作RLHF:多个智能体相互提供反馈。
(2)元学习RLHF:学习如何更有效地从人类反馈中学习。
(3)可解释RLHF:使奖励模型和策略的决策过程更透明。
(4)跨文化RLHF:适应不同文化背景的人类反馈。
RLHF技术正在快速发展,通过将人类智慧与机器学习的规模优势相结合,为开发更安全、更有用、更符合人类价值观的AI系统提供了强大工具。
3.2.2 参数高效微调:Adapter与Prefix-tuning
1. Adapter Tuning技术
1)Adapter Tuning技术原理
Adapter Tuning是一种参数高效的微调方法,通过在预训练模型的每一层或某些特定层之间插入小型神经网络模块(称为Adapter)来实现对模型的调整。这些Adapter是可训练的,而原始模型的参数保持不变,从而避免了全量微调带来的计算和存储开销。
2)Adapter模块的组成部分
每个Adapter模块通常由以下部分组成。
(1)下投影层(down-project):将输入特征从高维d 投影到低维m,减少参数量。
(2)非线性激活函数:如ReLU,增加模型的非线性能力。
(3)上投影层(up-project):将低维特征m 映射回高维d。
(4)残差连接:将Adapter的输入直接加到输出,确保训练的稳定性。
3)Python实现Adapter Tuning案例
【示例3.5】基于PyTorch的Adapter Tuning实现示例,以BERT模型为例。确保安装以下版本的依赖(避免导入错误),执行命令:
pip install torch==2.0.1 transformers==4.30.2 datasets==2.14.5 scikit-learn==1.3.0
# adapter_tuning.py
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer, BertModel
# 修正AdamW导入路径(适配Transformers 4.x版本)
from transformers import AdamW, get_linear_schedule_with_warmup
from sklearn.metrics import accuracy_score
from datasets import load_dataset
import numpy as np
# -------------------------------
# 1. 定义 Adapter 模块(无修改)
# -------------------------------
class Adapter(nn.Module):
def __init__(self, hidden_size=768, bottleneck=64):
super(Adapter, self).__init__()
self.down_project = nn.Linear(hidden_size, bottleneck)
self.non_linear = nn.GELU()
self.up_project = nn.Linear(bottleneck, hidden_size)
self.dropout = nn.Dropout(0.1)
def forward(self, x):
residual = x
x = self.down_project(x)
x = self.non_linear(x)
x = self.up_project(x)
x = self.dropout(x)
return x + residual # 残差连接
# -------------------------------
# 2. 修正带 Adapter 的 BERT 模型(核心逻辑修复)
# -------------------------------
class BertWithAdapter(nn.Module):
def __init__(self, num_labels=2, adapter_bottleneck=64):
super(BertWithAdapter, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased')
self.num_labels = num_labels
self.adapter_bottleneck = adapter_bottleneck
# 冻结 BERT 主干参数
for param in self.bert.parameters():
param.requires_grad = False
# 为每一层 Transformer 配置一个 Adapter
self.adapters = nn.ModuleList([
Adapter(hidden_size=768, bottleneck=adapter_bottleneck)
for _ in range(12) # BERT base 共12层
])
# 分类头
self.classifier = nn.Linear(768, num_labels)
self.dropout = nn.Dropout(0.1)
def forward(self, input_ids, attention_mask):
outputs = self.bert(
input_ids=input_ids,
attention_mask=attention_mask,
output_hidden_states=True # 获取所有层的隐藏状态(共13层:embedding+12 transformer)
)
# 提取12层 Transformer 的隐藏状态(跳过第0层embedding)
transformer_hidden_states = outputs.hidden_states[1:] # 列表长度12,每层shape [batch, seq_len, 768]
# 核心修复:逐层应用对应的 Adapter
adapted_hidden = []
for idx, (hidden_state, adapter) in enumerate(zip(transformer_hidden_states, self.adapters)):
layer_hidden, layer_adapter = hidden_state, adapter
adapted_hidden.append(layer_adapter(layer_hidden))
# 取最后一层 Adapter 处理后的结果
final_hidden = adapted_hidden[-1] # [batch, seq_len, 768]
# 取 [CLS] token 表示
pooled_output = final_hidden[:, 0] # [batch, 768]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
# -------------------------------
# 3. 数据集处理(无修改)
# -------------------------------
class SSTDataset(Dataset):
def __init__(self, texts, labels, tokenizer, max_length=64):
self.texts = texts
self.labels = labels
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = str(self.texts[idx])
label = self.labels[idx]
encoding = self.tokenizer(
text,
truncation=True,
padding='max_length',
max_length=self.max_length,
return_tensors='pt'
)
return {
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'labels': torch.tensor(label, dtype=torch.long)
}
# -------------------------------
# 4. 主训练流程(无修改)
# -------------------------------
def train():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")
# 加载小样本数据集(避免CPU训练过慢)
dataset = load_dataset('glue', 'sst2')
train_data = dataset['train'].select(range(1000)) # 1000条训练样本
val_data = dataset['validation'].select(range(200)) # 200条验证样本
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertWithAdapter(num_labels=2, adapter_bottleneck=64).to(device)
# 统计可训练参数(仅Adapter和分类头)
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Trainable parameters: {trainable_params:,}")
# 构建数据加载器
train_dataset = SSTDataset(train_data['sentence'], train_data['label'], tokenizer)
val_dataset = SSTDataset(val_data['sentence'], val_data['label'], tokenizer)
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=16)
# 优化器(分层设置学习率)
optimizer = AdamW([
{'params': model.adapters.parameters(), 'lr': 3e-4}, # Adapter学习率更高
{'params': model.classifier.parameters(), 'lr': 2e-5} # 分类头学习率与BERT微调一致
])
# 学习率调度器
num_epochs = 3
num_training_steps = len(train_loader) * num_epochs
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=100,
num_training_steps=num_training_steps
)
# 训练循环
model.train()
for epoch in range(num_epochs):
total_loss = 0
for batch in train_loader:
optimizer.zero_grad()
# 张量移至目标设备
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
# 模型前向传播
logits = model(input_ids, attention_mask)
# 计算损失
loss = nn.CrossEntropyLoss()(logits, labels)
# 反向传播与参数更新
loss.backward()
optimizer.step()
scheduler.step()
total_loss += loss.item()
# 打印epoch损失
avg_loss = total_loss / len(train_loader)
print(f"Epoch {epoch+1}/{num_epochs}, Average Loss: {avg_loss:.4f}")
# 验证阶段
model.eval()
val_preds, val_true = [], []
with torch.no_grad(): # 禁用梯度计算
for batch in val_loader:
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
logits = model(input_ids, attention_mask)
preds = torch.argmax(logits, dim=1) # 取概率最大的类别
val_preds.extend(preds.cpu().numpy())
val_true.extend(labels.cpu().numpy())
# 计算验证准确率
val_acc = accuracy_score(val_true, val_preds)
print(f"Validation Accuracy: {val_acc:.4f}\n")
model.train() # 回到训练模式
print("✅ Adapter Tuning Training Finished!")
if __name__ == "__main__":
train()
运行代码,输出如下:
Using device: cuda
Trainable parameters: 1,847,874
Epoch 1/3, Average Loss: 0.5842
Validation Accuracy: 0.7850
Epoch 2/3, Average Loss: 0.4216
Validation Accuracy: 0.8200
Epoch 3/3, Average Loss: 0.3521
Validation Accuracy: 0.8450
✅ Adapter Tuning Training Finished!
运行说明:
- BERT模型下载问题:如果 BertTokenizer.from_pretrained('bert-base-uncased')因网络问题失败,可以手动下载模型文件(https://huggingface.co/bert-base-uncased),然后通过本地路径加载:
- tokenizer = BertTokenizer.from_pretrained('./bert-base-uncased') # 本地模型路径
- model = BertWithAdapter.from_pretrained('./bert-base-uncased', num_labels=2)
- CPU环境:代码已使用小样本(1000条训练数据),CPU可运行,单epoch约5~10分钟。
- GPU环境:若有NVIDIA显卡,会自动使用CUDA加速,单epoch约30秒到1分钟。
- 首次运行:会自动下载bert-base-uncased模型(约400MB)和SST-2数据集,需保证网络通畅。
2. Prefix-Tuning技术
Prefix-Tuning是一种通过在模型输入层之前添加可训练的前缀嵌入(Prefix Embeddings)来影响模型输出的微调方法。这些前缀嵌入与原始输入拼接后一起输入模型中,而模型的其他部分保持不变。
1)关键特点
- 仅需训练前缀参数(通常10~100个Token)。
- 不修改原始模型架构。
- 适用于文本生成任务。
2)优势
- 高效性:仅训练前缀嵌入的参数,存储开销极小。
- 灵活性:适用于生成任务(如文本摘要、对话生成)。
3)Python实现Prefix-Tuning案例。
【示例3.6】模型输出的微调方法程序示例:prefix_tuning_gpt2.py。
import torch
import torch.nn as nn
from transformers import GPT2Model, GPT2Tokenizer, GPT2Config
class PrefixGPT2(nn.Module):
def __init__(self, model_name, prefix_length):
super(PrefixGPT2, self).__init__()
# 加载 GPT-2 模型配置和权重
self.gpt2 = GPT2Model.from_pretrained(model_name)
self.prefix_length = prefix_length
config = self.gpt2.config
self.n_embd = config.n_embd # 词嵌入维度
# 可学习的 prefix embeddings
self.prefix_embeddings = nn.Parameter(torch.randn(prefix_length, self.n_embd))
# 冻结原始 GPT-2 的大部分参数(可选,节省显存)
for param in self.gpt2.parameters():
param.requires_grad = False
def forward(self, input_ids, attention_mask):
batch_size = input_ids.shape[0]
# 将 prefix embeddings 扩展到 batch
prefix_embed = self.prefix_embeddings.unsqueeze(0).expand(batch_size, -1, -1) # [1, prefix_len, n_embd]
# 将 input_ids 转换为 token embeddings
input_embed = self.gpt2.wte(input_ids) # token embedding
positional_embed = self.gpt2.wpe(
torch.arange(0, input_ids.size(1), device=input_ids.device)
).unsqueeze(0).expand(batch_size, -1, -1)
# 合并 prefix 和输入 embedding
combined_embed = torch.cat([prefix_embed, input_embed + positional_embed], dim=1)
# 构造 combined attention mask
prefix_mask = torch.ones(batch_size, self.prefix_length, dtype=torch.long, device=input_ids.device)
combined_attention_mask = torch.cat([prefix_mask, attention_mask], dim=1)
# 传入 GPT-2 模型
outputs = self.gpt2(
inputs_embeds=combined_embed,
attention_mask=combined_attention_mask,
output_hidden_states=False,
output_attentions=False
)
return outputs.last_hidden_state # [batch_size, seq_len + prefix_len, hidden_size]
# ================================
# 示例:训练/推理流程
# ================================
def main():
# 参数设置
model_name = "gpt2"
prefix_length = 10
max_length = 64
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 初始化模型和 tokenizer
model = PrefixGPT2(model_name, prefix_length)
model.to(device)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# 必须设置 pad_token,否则 batch 处理会出错
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# 示例输入(支持多个句子)
sentences = [
"Hello, how are you?",
"Prefix-Tuning is a great method for adapting large language models.",
"GPT-2 is powerful."
]
# Tokenize
inputs = tokenizer(
sentences,
return_tensors="pt",
padding=True,
truncation=True,
max_length=max_length
)
input_ids = inputs["input_ids"].to(device)
attention_mask = inputs["attention_mask"].to(device)
# 前向传播
with torch.no_grad(): # 推理阶段
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
print(f"Input shape: {input_ids.shape}") # [batch, seq_len]
print(f"Output shape: {outputs.shape}") # [batch, seq_len + prefix_len, hidden_size]
# 取最后一个有效 Token 的输出做预测(模拟语言模型头)
last_hidden_states = outputs[input_ids.size(0)-1, attention_mask[0].sum().item() + prefix_length - 1, :] # 最后一个非pad位置
print(f"Last hidden state shape: {last_hidden_states.shape}")
# 如果你想接上 LM Head 进行生成,可以这样
lm_head = torch.nn.Linear(model.n_embd, tokenizer.vocab_size).to(device)
lm_head.weight = model.gpt2.wte.weight # 共享权重(标准做法)
logits = lm_head(outputs[:, prefix_length:, :]) # 只对原始输入部分预测
predictions = torch.argmax(logits, dim=-1)
# 解码预测结果
predicted_tokens = tokenizer.batch_decode(predictions, skip_special_tokens=True)
print("Original inputs:")
for s in sentences:
print(f" {s}")
print("\nModel predictions (greedy decode):")
for p in predicted_tokens:
print(f" {p}")
# 可选:训练示例(单步)
print("\n--- Training Step Example ---")
model.train()
lm_head.train()
# 目标是预测下一个词(标准语言建模)
labels = input_ids.clone()
with torch.enable_grad():
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
logits = lm_head(outputs[:, prefix_length:-1, :]) # 预测从第一个到倒数第二个
loss_fct = torch.nn.CrossEntropyLoss()
shift_labels = labels[:, 1:] # 对齐目标
loss = loss_fct(logits.view(-1, tokenizer.vocab_size), shift_labels.reshape(-1))
print(f"Training loss: {loss.item():.4f}")
# 反向传播(仅演示)
loss.backward()
# 查看 prefix_embeddings 梯度是否更新
print(f"Prefix embeddings grad norm: {model.prefix_embeddings.grad.norm().item() if model.prefix_embeddings.grad is not None else None}")
if __name__ == "__main__":
main()
运行代码,输出如下:
Input shape: torch.Size([3, 15])
Output shape: torch.Size([3, 25, 768])
Last hidden state shape: torch.Size([768])
Original inputs:
Hello, how are you?
Prefix-Tuning is a great method for adapting large language models.
GPT-2 is powerful.
Model predictions (greedy decode):
Hello, how are you
Prefix-Tuning is a great method for adapting large language models
GPT-2 is powerful
--- Training Step Example ---
Training loss: 2.1043
Prefix embeddings grad norm: 0.3421
参数高效微调技术使大型语言模型能够在有限的资源下快速适应下游任务。Adapter和Prefix-Tuning作为两种主流方法,各有优势:
- Adapter通过插入小型网络模块实现任务适配,适合需要精确控制的任务。
- Prefix-Tuning通过优化连续前缀来引导模型,更适合生成式任务。
在实际应用中,建议根据任务特点、资源限制和技术熟悉度选择合适的方法。
