使用猴子补丁对pytorch的分布式接口进行插桩

训练脚本:

python 复制代码
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torch import nn
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
import torch.nn.functional as F
import os
import distributed_patch

# 设置 NCCL 日志环境变量
'''
os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"  # 或者 COLL
os.environ["NCCL_LOG_FILE"] = "nccl_log.txt"

# 运行 PyTorch 分布式代码
'''




class Net(nn.Module):  # 模型定义
    def __init__(self):
        super(Net, self).__init__()
        self.flatten = nn.Flatten()
        self.seq = nn.Sequential(
            nn.Linear(28 * 28, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 10)
        )

    def forward(self, x):
        x = self.flatten(x)
        return self.seq(x)


def main():
    dist.init_process_group(backend='nccl')  # 【集合通讯】其他进程连master,大家互认

    rank = dist.get_rank()
    world_size = dist.get_world_size()
    device_name = f'cuda:{rank}'

    checkpoint = None  # 各自加载checkpoint
    try:
        checkpoint = torch.load('checkpoint.pth', map_location='cpu')  # checkpoint是cuda:0保存的,加载默认会读到cuda:0,所以明确指定给cpu
    except:
        pass

    model = Net().to(device_name)
    if checkpoint and rank == 0:  # rank0恢复模型参数
        model.load_state_dict(checkpoint['model'])

    model = DDP(model)  # 【集合通讯】rank0广播参数给其他进程

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # model参数一致,则optim会保证其初始状态一致
    if checkpoint:
        optimizer.load_state_dict(checkpoint['optimizer'])  # 各自加载checkpoint

    train_dataset = MNIST(root='./data', download=True, transform=ToTensor(), train=True)  # 各自加载dataset
    sampler = DistributedSampler(train_dataset)  # 指派子集给各进程
    train_dataloader = DataLoader(train_dataset, batch_size=32, sampler=sampler, persistent_workers=True, num_workers=2)

    val_dataset = MNIST(root='./data', download=True, transform=ToTensor(), train=False)
    val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=True, persistent_workers=True, num_workers=2)

    for epoch in range(20):
        sampler.set_epoch(epoch)  # 【集合通讯】生成随机种子,rank0广播给其他进程

        model.train()
        for x, y in train_dataloader:
            x, y = x.to(device_name), y.to(device_name)
            pred_y = model(x)  # 【集合通讯】rank0广播model buffer给其他进程
            loss = F.cross_entropy(pred_y, y)
            optimizer.zero_grad()
            loss.backward()  # 【集合通讯】每个参数的梯度做all reduce(每个进程会收到其他进程的梯度,并求平均)
            optimizer.step()

        dist.reduce(loss, dst=0)  # 【集合通讯】rank0汇总其他进程的loss

        if rank == 0:
            train_avg_loss = loss.item() / world_size

            # evaluate
            raw_model = model.module
            val_loss = 0
            with torch.no_grad():
                for x, y in val_dataloader:
                    x, y = x.to(device_name), y.to(device_name)
                    pred_y = raw_model(x)
                    loss = F.cross_entropy(pred_y, y)
                    val_loss += loss.item()
            val_avg_loss = val_loss / len(val_dataloader)
            print(f'train_loss:{train_avg_loss} val_loss:{val_avg_loss}')

            # checkpoint
            torch.save({'model': model.module.state_dict(), 'optimizer': optimizer.state_dict()}, '.checkpoint.pth')
            os.replace('.checkpoint.pth', 'checkpoint.pth')

        dist.barrier()  # 【集合通讯】等待rank0跑完eval



if __name__ == '__main__':
    main()

# torchrun --nproc_per_node 1 pytorch_dis_gpu.py

插桩脚本:

python 复制代码
import torch.distributed as dist

# 保存原始函数引用
original_functions = {
    "init_process_group": dist.init_process_group,
    "all_reduce": dist.all_reduce,
    "reduce": dist.reduce,
    "broadcast": dist.broadcast,
    "barrier": dist.barrier,
    "get_rank": dist.get_rank,
    "get_world_size": dist.get_world_size
}

# 插桩函数
def patched_init_process_group(*args, **kwargs):
    print("[distributed] init_process_group called")
    return original_functions["init_process_group"](*args, **kwargs)

def patched_all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, async_op=False):
    print("[distributed] all_reduce called")
    return original_functions["all_reduce"](tensor, op, group, async_op)

def patched_reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, async_op=False):
    print("[distributed] reduce called")
    return original_functions["reduce"](tensor, dst, op, group, async_op)

def patched_broadcast(tensor, src, group=None, async_op=False):
    print("[distributed] broadcast called")
    return original_functions["broadcast"](tensor, src, group, async_op)

def patched_barrier(*args, **kwargs):
    print("[distributed] barrier called")
    return original_functions["barrier"](*args, **kwargs)

def patched_get_rank(*args, **kwargs):
    print("[distributed] get_rank called")
    return original_functions["get_rank"](*args, **kwargs)

def patched_get_world_size(*args, **kwargs):
    print("[distributed] get_world_size called")
    return original_functions["get_world_size"](*args, **kwargs)

# 替换分布式接口函数为插桩版本
dist.init_process_group = patched_init_process_group
dist.all_reduce = patched_all_reduce
dist.reduce = patched_reduce
dist.broadcast = patched_broadcast
dist.barrier = patched_barrier
dist.get_rank = patched_get_rank
dist.get_world_size = patched_get_world_size
相关推荐
Seeklike13 分钟前
diffusers学习--stable diffusion的管线解析
人工智能·stable diffusion·diffusers
数据知道19 分钟前
机器翻译:模型微调(Fine-tuning)与调优详解
人工智能·自然语言处理·机器翻译
白露与泡影1 小时前
Spring容器初始化源码解析
java·python·spring
码界筑梦坊1 小时前
98-基于Python的网上厨房美食推荐系统
开发语言·python·美食
沫儿笙1 小时前
焊接机器人保护气体效率优化
人工智能·机器人
青岛前景互联信息技术有限公司1 小时前
应急救援智能接处警系统——科技赋能应急,筑牢安全防线
人工智能·物联网·智慧城市
计算机源码社2 小时前
分享一个基于Hadoop的二手房销售签约数据分析与可视化系统,基于Python可视化的二手房销售数据分析平台
大数据·hadoop·python·数据分析·毕业设计项目·毕业设计源码·计算机毕设选题
楚韵天工2 小时前
基于多分类的工业异常声检测及应用
人工智能·深度学习·神经网络·目标检测·机器学习·分类·数据挖掘
爱分享的飘哥2 小时前
第六十五章:AI的“精良食材”:图像标注、视频帧抽帧与字幕提取技巧
人工智能·语音识别·ai训练·视频处理·数据预处理·图像标注·字幕提取
lpfasd1232 小时前
非中文语音视频自动生成中文字幕的完整实现方案
开发语言·python