Accelerate_deepspeed使用

执行完accelerate config会有一个默认的yaml配置文件

python 复制代码
from accelerate import Accelerator
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models

# 初始化Accelerator
accelerator = Accelerator()
device = accelerator.device

# 数据预处理
transform = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomCrop(32, padding=4),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

# 加载数据集
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=2048, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=2048, shuffle=False, num_workers=4)

# 定义模型
model = models.resnet18(num_classes=10).to(device)
print(f'initial model device: {next(model.parameters()).device}')

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

# 使用accelerate包装模型、优化器和数据加载器
model, optimizer, train_loader, test_loader = accelerator.prepare(model, optimizer, train_loader, test_loader)

# 检查分配情况
print(f"Model is on device: {next(model.parameters()).device}")
for batch_idx, (inputs, targets) in enumerate(train_loader):
    print(f"batch_{batch_idx} inputs are on device: {inputs.device}")
    print(f"batch_{batch_idx} targets are on device: {targets.device}")
    

# 训练函数
def train(epoch):
    model.train()
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        print(f'{epoch}:{batch_idx}', accelerator.is_main_process, batch_idx, inputs.device, targets.device, model.device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        accelerator.backward(loss)
        optimizer.step()
        if batch_idx % 200 == 0:
            print(f'{accelerator.is_main_process}, Train Epoch: {epoch} [{batch_idx * len(inputs)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}')

# 测试函数
def test():
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, targets in test_loader:
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            test_loss += accelerator.gather(loss).sum().item()
            pred = outputs.argmax(dim=1, keepdim=True)
            correct += accelerator.gather(pred.eq(targets.view_as(pred)).sum()).sum().item()
            total += targets.size(0) * accelerator.num_processes  # 这里的总数可以直接累加
    test_loss /= total
    accuracy = 100. * correct / total
    if accelerator.is_main_process:
        print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{total} ({accuracy:.0f}%)')

# 主训练循环
for epoch in range(3):
    train(epoch)
    test()

print("Training completed.")
相关推荐
逄逄不是胖胖2 小时前
《动手学深度学习》-55-1RNN的复杂实现
pytorch·深度学习·机器学习
组合缺一2 小时前
论 AI Skills 分布式发展的必然性:从单体智能到“云端大脑”的跃迁
java·人工智能·分布式·llm·mcp·skills
阡陌..3 小时前
Linux下用docker调用pytorch-无法检测到cuda问题
linux·pytorch·docker
小哈里3 小时前
【计算】Ray框架介绍,AI基础设施之“通用”分布式计算(跨场景,门槛低,大规模生产,单机->集群->推理一站式)
人工智能·大模型·llm·分布式计算·ray
Pythonliu75 小时前
BindCraft Installation 使用
人工智能·pytorch·计算化学
工程师老罗5 小时前
PyTorch与TensorBoard兼容性问题解决方案
人工智能·pytorch·python
山顶夕景18 小时前
【VLM】Visual Merit or Linguistic Crutch? 看DeepSeek-OCR
大模型·llm·ocr·多模态
玄同76519 小时前
LangChain 核心组件全解析:构建大模型应用的 “乐高积木”
人工智能·python·语言模型·langchain·llm·nlp·知识图谱
亚里随笔21 小时前
相对优势估计存在偏差——揭示群体相对强化学习中的系统性偏差问题
人工智能·深度学习·机器学习·llm·agentic·rlvr