DCU异构程序--矩阵乘

目录

一、概述

二、程序实现

三、编译运行


一、概述

HIP属于显式编程模型,需要在程序中明确写出并行控制语句,包括数据传输、核函数启动等。核函数是运行在DCU上的函数,在CPU端运行的部分称为主机端(主要是执行管理和启动),DCU端运行的部分称为设备端(用于执行计算)。大概的流程如下图:
HIP程序流程

①主机端将需要并行计算的数据通过hipMemcpy()传递给DCU(将CPU存储的内容传递给DCU的显存);

②调用核函数启动函数hipLaunchKernelGGL()启动DCU,开始执行计算;

③设备端将计算好的结果数据通过hipMemcpy()从DCU复制回CPU。

hipMemcpy()是阻塞式的,数据复制完成后才可以执行后续的程序;hipLanuchKernelGGL()是非阻塞式的,执行完后程序继续向后执行,但是在Kernel没有计算完成之前,最后一个hipMemcpy()是不会开始的,这是由于HIP的Stream机制。

二、程序实现

下面是对矩阵乘的具体实现,MatrixMul.cpp:

cpp 复制代码
#include <stdio.h>
#include <assert.h>
#include "hip/hip_runtime.h"
#include "helper_functions.h"
#include "helper_hip.h"


template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
    int bx = blockIdx.x;
    int by = blockIdx.y;

    int tx = threadIdx.x;
    int ty = threadIdx.y;

    int aBegin = wA * BLOCK_SIZE * by;
    int aEnd   = aBegin + wA - 1;
    int aStep  = BLOCK_SIZE;
    
    int bBegin = BLOCK_SIZE * bx;
    int bStep  = BLOCK_SIZE * wB;

    float Csub = 0;

    for(int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
    {
        __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
        __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];

        As[ty][tx] = A[a + wA * ty + tx];
        Bs[ty][tx] = B[b + wB * ty + tx];

        __syncthreads();

#pragma unroll
        for(int k = 0; k < BLOCK_SIZE; ++k)
        {
            Csub += As[ty][k] * Bs[k][tx];
        }
    
        __syncthreads();
    }

    int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
    C[c + wB * ty + tx] = Csub;
}

void ConstantInit(float *data, int size, float val)
{
    for(int i = 0; i < size; ++i)
    {
        data[i] = val;
    }
}


int MatrixMultiply(int argc, char **argv, int block_size, const dim3 &dimsA, const dim3 &dimsB)
{
    unsigned int size_A = dimsA.x * dimsA.y;
    unsigned int mem_size_A = sizeof(float) * size_A;
    float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
    unsigned int size_B = dimsB.x * dimsB.y;
    unsigned int mem_size_B = sizeof(float) * size_B;
    float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
    hipStream_t stream;

    const float valB = 0.01f;
    ConstantInit(h_A, size_A, 1.0f);
    ConstantInit(h_B, size_B, valB);

    float *d_A, *d_B, *d_C;

    dim3 dimsC(dimsB.x, dimsA.y, 1);
    unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
    float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));

    if(h_C == NULL)
    {
        fprintf(stderr, "Failed to allocate host matrix C!\n");
        exit(EXIT_FAILURE);
    }

    checkHIPErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
    checkHIPErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
    checkHIPErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));

    hipEvent_t start, stop;
    checkHIPErrors(hipEventCreate(&start));
    checkHIPErrors(hipEventCreate(&stop));

    checkHIPErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));

    checkHIPErrors(hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
    checkHIPErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));

    dim3 threads(block_size, block_size);
    dim3 grid(dimsB.x/threads.x, dimsA.y/threads.y);

    printf("Computing result using CUDA Kernel...\n");

    if(block_size == 16)
    {
        hipLaunchKernelGGL(HIP_KERNEL_NAME(MatrixMulCUDA<16>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
    }
    else
    {
        hipLaunchKernelGGL(HIP_KERNEL_NAME(MatrixMulCUDA<32>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
    }

    printf("Done\n");
    checkHIPErrors(hipStreamSynchronize(stream));

    checkHIPErrors(hipEventRecord(start, stream));

    int nIter = 300;

    for(int j = 0; j < nIter; j++)
    {
        if(block_size == 16)
        {
            hipLaunchKernelGGL(HIP_KERNEL_NAME(MatrixMulCUDA<16>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
        }
        else
        {
            hipLaunchKernelGGL(HIP_KERNEL_NAME(MatrixMulCUDA<32>), dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B, dimsA.x, dimsB.x);
        }
    }

    checkHIPErrors(hipEventRecord(stop, stream));

    checkHIPErrors(hipEventSynchronize(stop));

    float msecTotal = 0.0f;
    checkHIPErrors(hipEventElapsedTime(&msecTotal, start, stop));

    float msecPerMatrixMul = msecTotal/nIter;
    double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) * static_cast<double>(dimsA.y) * static_cast<double>(dimsB.x);
    double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul/1000.0f);

    printf("Performance = %.2f GFlop/s, Time = %.3f msec, Size = %.0f Ops, WorkgroupSize = %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y);

    checkHIPErrors(hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
    checkHIPErrors(hipStreamSynchronize(stream));

    printf("Checking computed result for correctness:");
    bool correct = true;

    double eps = 1.e-6;

    for(int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++)
    {
        double abs_err = fabs(h_C[i] - (dimsA.x * valB));
        double dot_length = dimsA.x;
        double abs_val = fabs(h_C[i]);
        double rel_err = abs_err / abs_val / dot_length;

        if(rel_err > eps)
        {
            printf("Error! Matrix[%05d] = %.8f, ref = %.8f error term is > %E\n", i, h_C[i], dimsA.x * valB, eps);
            correct = false;
        }
    }

    printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");

    free(h_A);
    free(h_B);
    free(h_C);
    checkHIPErrors(hipFree(d_A));
    checkHIPErrors(hipFree(d_B));
    checkHIPErrors(hipFree(d_C));
    checkHIPErrors(hipEventDestroy(start));
    checkHIPErrors(hipEventDestroy(stop));
    printf("\nNOTE: The CUDA Samples are not meant for performance measurement. Results may vary when GPU Boost is enabled.\n");

    if(correct)
    {
        return EXIT_SUCCESS;
    }
    else
    {
        return EXIT_FAILURE;
    }
}

int main(int argc, char *argv[])
{
    printf("[Matrix Multiply Using CUDA] - Starting...\n");

    if(checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?"))
    {
        printf("Usage -device=n (n >= 0 for deviceID)\n");
        printf("      -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
        printf("      -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
        printf("  Note: Outer matrix dimensions of A & B matrices must be equal.\n");

        exit(EXIT_SUCCESS);
    }

    int dev = findHIPDevice(argc, (const char **)argv);
    int block_size = 32;

    dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
    dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);

    if(checkCmdLineFlag(argc, (const char **)argv, "wA"))
    {
        dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
    }
    
    if(checkCmdLineFlag(argc, (const char **)argv, "hA"))
    {
        dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
    }

    if(checkCmdLineFlag(argc, (const char **)argv, "wB"))
    {
        dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
    }

    if(checkCmdLineFlag(argc, (const char **)argv, "hB"))
    {
        dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
    }

    if(dimsA.x != dimsB.y)
    {
        printf("Error: outer matrix dimensions must be equal. (%d != %d) \n", dimsA.x, dimsB.y);
        exit(EXIT_FAILURE);
    }

    printf("Matrix A(%d, %d), Matrix B(%d, %d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);

    int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);

    exit(matrix_result);
}

三、编译运行

HIP程序采用hipcc编译

影响结果:

相关推荐
Hi竹子1 小时前
Linux Centos 安装Jenkins到服务
linux·centos·jenkins
别吵我午休1 小时前
linux新磁盘做分区(GPT分区表)
linux·运维·gpt·磁盘挂载
不是AI2 小时前
【C语言】【C++】Curl库的安装
c语言·开发语言·c++
计算机小混子2 小时前
C++实现设计模式---模板方法模式 (Template Method)
c++·设计模式·模板方法模式
futurismme-锦光2 小时前
戴尔电脑开机出现MBR和GPT处理
linux·windows·电脑
思想永无止境3 小时前
如何在 CentOS 中生成 CSR
linux·运维·centos
不要吃栗子李3 小时前
高级运维:shell练习2
linux·运维·php
拾忆,想起3 小时前
深入浅出负载均衡:理解其原理并选择最适合你的实现方式
分布式·后端·微服务·负载均衡
LensonYuan3 小时前
在Linux系统中无网络安装Nginx并配置负载均衡
linux·网络·nginx