(5)cuda中的grid、block

文章目录

概要

在CUDA中,host和device 是两个重要的概念,我们用host指代CPU及其内存 ,而用device指代GPU及其内存

一般的CUDA程序的执行流程如下:

  1. 分配host内存,并进行数据初始化;
  2. 分配device内存,并从host将数据拷贝到device上;
  3. 调用CUDA的核函数在device上完成指定的运算;
  4. 将device上的运算结果拷贝到host上;
  5. 释放device和host上分配的内存。

整体架构流程

一般来说:

一个kernel对应一个grid

一个grid可以有多个block,一维~三维

一个block可以有多个thread,一维~三维

我们写的kernel function运行在block中的每个thread中。

https://cuda-programming.blogspot.com/2013/01/thread-and-block-heuristics-in-cuda.html

cpp 复制代码
#include <cuda_runtime.h>
#include <stdio.h>

//核函数 打印线程索引
__global__ void print_idx(){
    printf("block idx: (%3d, %3d, %3d), thread idx: (%3d, %3d, %3d)\n",
         blockIdx.z, blockIdx.y, blockIdx.x,
         threadIdx.z, threadIdx.y, threadIdx.x);
}

void demo_print(){
    int inputSize = 8;
    int blockDim = 4;  // block的维度 即 block中的线程数量
    int gridDim = inputSize / blockDim; // 计算出需要2个block,所以grid的维度为2

    dim3 block(blockDim);
    dim3 grid(gridDim);

    print_idx<<<grid, block>>>();
	//cudaDeviceSynchroize()来强制性的让kernel函数的结果执行结
	//束之后host再执行下一步。
    cudaDeviceSynchronize();
}

int main() {
    demo_print();
    return 0;
}

打印grid和block的维度

cpp 复制代码
__global__ void print_dim(){
    printf("grid dimension: (%3d, %3d, %3d), block dimension: (%3d, %3d, %3d)\n",
         gridDim.z, gridDim.y, gridDim.x,
         blockDim.z, blockDim.y, blockDim.x);
}

计算每个线程在block中的索引

cpp 复制代码
__global__ void print_thread_idx_per_block(){
    int index = threadIdx.z * blockDim.x * blockDim.y + \
              threadIdx.y * blockDim.x + \
              threadIdx.x;

    printf("block idx: (%3d, %3d, %3d), thread idx: %3d\n",
         blockIdx.z, blockIdx.y, blockIdx.x,
         index);
}

计算每个线程在grid中的索引

cpp 复制代码
__global__ void print_thread_idx_per_grid(){
    int block_Size  = blockDim.z * blockDim.y * blockDim.x;

    int block_Index = blockIdx.z * gridDim.x * gridDim.y + \
               blockIdx.y * gridDim.x + \
               blockIdx.x;

    int thread_Index = threadIdx.z * blockDim.x * blockDim.y + \
               threadIdx.y * blockDim.x + \
               threadIdx.x;

    int thread_index_in_grid  = block_Index * block_Size + thread_Index;

    printf("block idx: %3d, thread idx in block: %3d, thread index in grid: %3d\n", 
         block_Index, thread_Index, thread_index_in_grid);
}

完整代码与输出

cpp 复制代码
#include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>

//核函数 打印线程索引
__global__ void print_idx(){
    printf("block idx: (%3d, %3d, %3d), thread idx: (%3d, %3d, %3d)\n",
         blockIdx.z, blockIdx.y, blockIdx.x,
         threadIdx.z, threadIdx.y, threadIdx.x);
}
//核函数 打印grid和block的维度
__global__ void print_dim(){
    printf("grid dimension: (%3d, %3d, %3d), block dimension: (%3d, %3d, %3d)\n",
         gridDim.z, gridDim.y, gridDim.x,
         blockDim.z, blockDim.y, blockDim.x);
}
//核函数 计算每个线程在block中的索引。GPU遍历顺序为Z,Y,X,所以计算的如下:
__global__ void print_thread_idx_per_block(){
    int index = threadIdx.z * blockDim.x * blockDim.y + \
              threadIdx.y * blockDim.x + \
              threadIdx.x;

    printf("block idx: (%3d, %3d, %3d), thread idx: %3d\n",
         blockIdx.z, blockIdx.y, blockIdx.x,
         index);
}

//核函数 计算每个线程在grid中的索引。GPU遍历顺序为Z,Y,X:
__global__ void print_thread_idx_per_grid(){
    int block_Size  = blockDim.z * blockDim.y * blockDim.x;

    int block_Index = blockIdx.z * gridDim.x * gridDim.y + \
               blockIdx.y * gridDim.x + \
               blockIdx.x;

    int thread_Index = threadIdx.z * blockDim.x * blockDim.y + \
               threadIdx.y * blockDim.x + \
               threadIdx.x;

    int thread_index_in_grid  = block_Index * block_Size + thread_Index;

    printf("block idx: %3d, thread idx in block: %3d, thread index in grid: %3d\n", 
         block_Index, thread_Index, thread_index_in_grid);
}



void demo_print(){
    int inputSize = 8;
    int blockDim = 4;  // block的维度 即 block中的线程数量
    int gridDim = inputSize / blockDim; // 计算出需要2个block,所以grid的维度为2

    dim3 block(blockDim);
    dim3 grid(gridDim);

    print_idx<<<grid, block>>>();
    //cudaDeviceSynchroize()来强制性的让kernel函数的结果执行结
	//束之后host再执行下一步。
    cudaDeviceSynchronize();
    std::cout << "---------------分割线---------------------------" << std::endl;
    print_dim<<<grid, block>>>();
    cudaDeviceSynchronize();
    std::cout << "---------------分割线---------------------------" << std::endl;
    print_thread_idx_per_block<<<grid, block>>>();
    cudaDeviceSynchronize();
    std::cout << "---------------分割线---------------------------" << std::endl;
    print_thread_idx_per_grid<<<grid, block>>>();
    cudaDeviceSynchronize();

}

int main() {
    demo_print();
    return 0;
}
bash 复制代码
cmake_minimum_required(VERSION 3.10)

project(test CUDA)
set(CMAKE_CUDA_STANDARD 20)

add_executable(test1 print_index_demo1.cu)
相关推荐
LyaJpunov9 天前
深入理解 C++ volatile 与 atomic:五大用法解析 + 六大高频考点
c++·面试·volatile·atomic
小灰灰搞电子9 天前
Qt PyQt与PySide技术-C++库的Python绑定
c++·qt·pyqt
时空自由民.9 天前
C++ 不同线程之间传值
开发语言·c++·算法
Ray_19979 天前
C++二级指针的用法指向指针的指针(多级间接寻址)
开发语言·jvm·c++
双叶8369 天前
(C语言)Map数组的实现(数据结构)(链表)(指针)
c语言·数据结构·c++·算法·链表·哈希算法
Jay_5159 天前
C++ STL 模板详解:由浅入深掌握标准模板库
c++·学习·stl
Cyrus_柯9 天前
C++(面向对象编程——继承)
开发语言·c++·算法·面向对象
Echo``9 天前
12.OpenCV—基础入门
开发语言·c++·人工智能·qt·opencv·计算机视觉
十秒耿直拆包选手9 天前
C++:动态库相关文件
c++
小L~~~9 天前
C++网络编程入门学习(五)-- CMake 学习笔记
linux·c++