NCCL集合通信算子DEMO及性能测试

NCCL集合通信算子DEMO及性能测试

以下代码用于测试NCCL算子的性能及正确性

一.复现代码

python 复制代码
tee ccl_benchmark.py <<-'EOF'
import os
import torch
import argparse
import torch.distributed as dist
from torch.distributed import ReduceOp
from datetime import datetime
import time
import argparse
import numpy as np
dev_type="cuda"
 
class Timer:
    def __init__(self,duration):        
        self.duration=duration
  
    def __enter__(self):
        dist.barrier()
        self.beg= datetime.now().timestamp() * 1e6
 
    def __exit__(self, exc_type, exc_val, exc_tb):
        dist.barrier()
        self.end=datetime.now().timestamp() * 1e6
        self.duration.append(self.end-self.beg)
 
op_mapping={}
class ccl_benchmark:
    def __init__(self,func):
        global op_mapping  
        op_mapping[func.__name__]=func
        self.func=func
         
    def __call__(self,*args,**kwargs):
        return self.func(*args,**kwargs)
         
@ccl_benchmark
def all_gather(shape,device,rank,world_size,iters=5):
    '''
    将每个rank input_tensor的数据在dim 0维度拼接在一起
    '''
    duration=[]
    input_tensor=(torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*(100+rank)).to(device)
    gather_list=[torch.zeros((shape[0]//world_size,shape[1]),dtype=torch.int64).to(device) for _ in range(world_size)]
    for _ in range(iters):
        with Timer(duration):
            dist.all_gather(gather_list,input_tensor)   
    output=torch.cat(gather_list,dim=0)
    gt=[torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*(100+i) for i in range(world_size)]
    gt=torch.cat(gt,dim=0)
    return duration,(output.cpu()==gt).all()
    
@ccl_benchmark
def scatter(shape,device,rank,world_size,iters=5):
    '''
    将每个rank从scatter_list[rank]取数据到output_tensor
    '''
    duration=[]
    output_tensor=torch.zeros((shape[0]//world_size,shape[1]),dtype=torch.int64).to(device)
    scatter_list=[(torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*i).to(device) for i in range(world_size)]
    for _ in range(iters):
        with Timer(duration):
            if rank == 0:
                dist.scatter(output_tensor,scatter_list=scatter_list,src =0)
            else:
                dist.scatter(output_tensor,src  = 0)
    gt=torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*rank
    return duration,(output_tensor.cpu()==gt).all()
   
@ccl_benchmark
def gather(shape,device,rank,world_size,iters=5):
    '''
    将每个rank input_tensor的数据在dim 0维度拼接在一起 只在批定的rank做
    '''
    duration=[]
    input_tensor=(torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*(100+rank)).to(device)
    gather_list=[torch.zeros((shape[0]//world_size,shape[1]),dtype=torch.int64).to(device) for _ in range(world_size)]
    for _ in range(iters):
        with Timer(duration):
            if rank == 0:
                dist.gather(input_tensor,gather_list=gather_list,dst=0)
            else:
                dist.gather(input_tensor,dst=0)
    ret=True
    if rank==0:
        output=torch.cat(gather_list,dim=0)
        gt=[torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*(100+i) for i in range(world_size)]
        gt=torch.cat(gt,dim=0)
        ret=(output.cpu()==gt).all()
    return duration,ret
  
@ccl_benchmark
def reduce(shape,device,rank,world_size,iters=5):
    '''
    将每个rank input_tensor的数据在dim 0维度拼接在一起 只在批定的rank做
    '''
    duration=[]   
    for _ in range(iters):
        input_tensor=(torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+rank)).to(device)
        # input_tensor的内容会被修改,所以放在循环里
        with Timer(duration):
            dist.reduce(input_tensor,dst=0,op=dist.ReduceOp.SUM)
    ret=True
    if rank==0:
        gt=[torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+i) for i in range(world_size)]
        gt_=gt[0]       
        for i in range(1,world_size):
            gt_=gt_+gt[i]
        ret=(input_tensor.cpu()==gt_).all()
    return duration,ret
         
@ccl_benchmark
def broadcast(shape,device,rank,world_size,iters=5):
    '''
    将src的rank的数据广播到其它rank
    '''
    duration=[]   
    for _ in range(iters):
        input_tensor=(torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+rank)).to(device)
        with Timer(duration):
            dist.broadcast(input_tensor,src=0)
 
    gt=(torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+0)).to('cpu')
    ret=(input_tensor.cpu()==gt).all()
    return duration,ret
  
@ccl_benchmark
def p2p(shape,device,rank,world_size,iters=5):
    '''
    将src的rank的数据广播到其它rank
    '''
    duration=[]   
    for _ in range(iters):
        input_tensor=(torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+rank)).to(device)
        with Timer(duration):
            if rank!=0:
                dist.recv(input_tensor,rank-1)               
            if rank!=world_size-1:               
                dist.send(input_tensor,dst=rank+1)   
 
    gt=(torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+0)).to('cpu')
    ret=(input_tensor.cpu()==gt).all()
    return duration,ret
 
@ccl_benchmark
def all_reduce(shape,device,rank,world_size,iters=5):
    '''
    将每个rank input_tensor的数据在dim 0维度拼接在一起
    '''
    duration=[]   
    for _ in range(iters):
        input_tensor=(torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+rank)).to(device)
        # input_tensor的内容会被修改,所以放在循环里
        with Timer(duration):
            dist.all_reduce(input_tensor,op=dist.ReduceOp.SUM)
 
    gt=[torch.ones((shape[0],shape[1]),dtype=torch.int64)*(100+i) for i in range(world_size)]
    gt_=gt[0]       
    for i in range(1,world_size):
        gt_=gt_+gt[i]
    ret=(input_tensor.cpu()==gt_).all()
    return duration,ret
 
@ccl_benchmark
def reduce_scatter(shape,device,rank,world_size,iters=5):
    '''
    '''
    duration=[]
    output_tensor=torch.zeros((shape[0]//world_size,shape[1]),dtype=torch.int64).to(device)
    input_list=[(torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*(100*rank)+chunk_id).to(device) for chunk_id in range(world_size)]
    for _ in range(iters):
        with Timer(duration):
            dist.reduce_scatter(output_tensor,input_list=input_list,op=dist.ReduceOp.SUM)
     
    gt_list=[(torch.ones((shape[0]//world_size,shape[1]),dtype=torch.int64)*(100*rk)+rank).to('cpu') for rk in range(world_size)]
    gt_=gt_list[0]       
    for i in range(1,world_size):
        gt_=gt_+gt_list[i]    
    return duration,(output_tensor.cpu()==gt_).all()
     
def main():
    dist.init_process_group(backend='nccl')

    if not torch.distributed.is_initialized():
        return
         
    parser = argparse.ArgumentParser(description='test')
    parser.add_argument('--shape', type=str, default="(1024,8192)", help='Number of epochs to train.')
    parser.add_argument('--iters', type=int, default=5, help='Number of epochs to train.')
    parser.add_argument('--op', type=str, default="", help='Number of epochs to train.')
    args = parser.parse_args()
     
    global op_mapping
 
    if args.op in op_mapping:
        torch.manual_seed(1)
        world_size = torch.distributed.get_world_size()
        rank = torch.distributed.get_rank()
        local_rank=int(os.environ['LOCAL_RANK'])
        torch.cuda.set_device(local_rank)
        device = torch.device(dev_type,local_rank)
        shape=eval(args.shape)
        duration,passed=op_mapping[args.op](shape,device,rank,world_size,args.iters)
        time.sleep(0.1*rank)
        print("rank:{} op:{} shape:{} iters:{} mean(us):{:.3f} passed:{}".format(rank,args.op,shape,args.iters,np.mean(duration[len(duration)//2:]),passed))
 
    dist.destroy_process_group()
         
if __name__=='__main__':
    main()
         
EOF
 
export NCCL_DEBUG=error
export NCCL_SOCKET_IFNAME=ens8
export NCCL_IB_DISABLE=1  
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=all_gather --shape="(1024,4096)" --iters=5
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=scatter --shape="(1024,4096)" --iters=5
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=gather --shape="(1024,4096)" --iters=5
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=reduce --shape="(1024,4096)" --iters=5
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=broadcast --shape="(1024,4096)" --iters=5
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=p2p --shape="(1024,4096)" --iters=5
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=all_reduce --shape="(1024,4096)" --iters=5
torchrun -m --nnodes=1 --nproc_per_node=4 ccl_benchmark --op=reduce_scatter --shape="(1024,4096)" --iters=5
相关推荐
好看资源平台1 小时前
网络爬虫——综合实战项目:多平台房源信息采集与分析系统
爬虫·python
进击的六角龙1 小时前
深入浅出:使用Python调用API实现智能天气预报
开发语言·python
檀越剑指大厂1 小时前
【Python系列】浅析 Python 中的字典更新与应用场景
开发语言·python
湫ccc2 小时前
Python简介以及解释器安装(保姆级教学)
开发语言·python
孤独且没人爱的纸鹤2 小时前
【深度学习】:从人工神经网络的基础原理到循环神经网络的先进技术,跨越智能算法的关键发展阶段及其未来趋势,探索技术进步与应用挑战
人工智能·python·深度学习·机器学习·ai
羊小猪~~2 小时前
tensorflow案例7--数据增强与测试集, 训练集, 验证集的构建
人工智能·python·深度学习·机器学习·cnn·tensorflow·neo4j
lzhlizihang2 小时前
python如何使用spark操作hive
hive·python·spark
q0_0p2 小时前
牛客小白月赛105 (Python题解) A~E
python·牛客
极客代码2 小时前
【Python TensorFlow】进阶指南(续篇三)
开发语言·人工智能·python·深度学习·tensorflow
庞传奇2 小时前
TensorFlow 的基本概念和使用场景
人工智能·python·tensorflow