Accelerate_deepspeed使用

执行完accelerate config会有一个默认的yaml配置文件

python 复制代码
from accelerate import Accelerator
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models

# 初始化Accelerator
accelerator = Accelerator()
device = accelerator.device

# 数据预处理
transform = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomCrop(32, padding=4),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

# 加载数据集
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=2048, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=2048, shuffle=False, num_workers=4)

# 定义模型
model = models.resnet18(num_classes=10).to(device)
print(f'initial model device: {next(model.parameters()).device}')

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

# 使用accelerate包装模型、优化器和数据加载器
model, optimizer, train_loader, test_loader = accelerator.prepare(model, optimizer, train_loader, test_loader)

# 检查分配情况
print(f"Model is on device: {next(model.parameters()).device}")
for batch_idx, (inputs, targets) in enumerate(train_loader):
    print(f"batch_{batch_idx} inputs are on device: {inputs.device}")
    print(f"batch_{batch_idx} targets are on device: {targets.device}")
    

# 训练函数
def train(epoch):
    model.train()
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        print(f'{epoch}:{batch_idx}', accelerator.is_main_process, batch_idx, inputs.device, targets.device, model.device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        accelerator.backward(loss)
        optimizer.step()
        if batch_idx % 200 == 0:
            print(f'{accelerator.is_main_process}, Train Epoch: {epoch} [{batch_idx * len(inputs)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}')

# 测试函数
def test():
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, targets in test_loader:
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            test_loss += accelerator.gather(loss).sum().item()
            pred = outputs.argmax(dim=1, keepdim=True)
            correct += accelerator.gather(pred.eq(targets.view_as(pred)).sum()).sum().item()
            total += targets.size(0) * accelerator.num_processes  # 这里的总数可以直接累加
    test_loss /= total
    accuracy = 100. * correct / total
    if accelerator.is_main_process:
        print(f'Test set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{total} ({accuracy:.0f}%)')

# 主训练循环
for epoch in range(3):
    train(epoch)
    test()

print("Training completed.")
相关推荐
Network_Engineer1 天前
从零手写LSTM:从门控原理到PyTorch源码级实现
人工智能·pytorch·lstm
多恩Stone1 天前
【3D-AICG 系列-1】Trellis v1 和 Trellis v2 的区别和改进
人工智能·pytorch·python·算法·3d·aigc
2501_901147831 天前
PyTorch DDP官方文档学习笔记(核心干货版)
pytorch·笔记·学习·算法·面试
铁手飞鹰1 天前
[深度学习]常用的库与操作
人工智能·pytorch·python·深度学习·numpy·scikit-learn·matplotlib
青春不朽5121 天前
PyTorch 入门指南:深度学习的瑞士军刀
人工智能·pytorch·深度学习
DeniuHe1 天前
Pytorch中统计学相关的函数
pytorch·python·深度学习
aopstudio1 天前
OpenClaw 实测体验:Agent 框架现在到底能不能用?
人工智能·llm·agent·openclaw
千桐科技2 天前
qKnow 知识平台核心能力解析|第 03 期:结构化抽取能力全流程介绍
大模型·llm·知识图谱·知识库·rag·qknow·知识平台
林深现海2 天前
【刘二大人】PyTorch深度学习实践笔记 —— 第四集:反向传播(凝练版)
pytorch·python·numpy
WGS.2 天前
fastenhancer DPRNN torch 实现
pytorch·深度学习