Как использовать функции WMMA? - PullRequest
0 голосов
/ 16 октября 2018

Я запустил пример в документации CUDA ,, но получил неожиданные результаты.Так как же использовать функцию wmma?Мой wmma::load_matrix_sync не так?Или что-то еще, что мы должны заметить? ...

WMMA_M,WMMA_N,WMMA_K = 16

__global__ void wmma_kernel(half *a, half *b, float *c, int matrix_size)
{
  //Declare the fragment
  wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
  wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> b_frag;
  wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K,float> acc_frag;

  //Load the matrix to fragment
  wmma::load_matrix_sync(a_frag, a, WMMA_M);
  wmma::load_matrix_sync(b_frag, b, WMMA_K);

  //perform mma
  wmma::fill_fragment(acc_frag, 0.0f);

  for(int i=0; i<1e4; i++)
    {
      wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
    }

  //store the result
  wmma::store_matrix_sync(c, acc_frag, WMMA_M, wmma::mem_row_major);
}

... Я установил значение элемента (d_a [i] d_b [i]) равным 1.0f, а c [i] = 0.0f,После выполнения функции wmma_kernel значение c [i] по-прежнему равно 0,0f, а значение elapsedTime также равно 0,0f.

matrix_size = 16 x 16

      //create the event
  cudaEvent_t start, stop;
  CUDA_CHECK_RETURN(cudaEventCreate(&start));
  CUDA_CHECK_RETURN(cudaEventCreate(&stop));

  //perform the wmma_kernel
  CUDA_CHECK_RETURN(cudaEventRecord(start));
  wmma_kernel<<<1,256>>>(d_a, d_b, d_c, matrix_size);

  CUDA_CHECK_RETURN(cudaEventRecord(stop));
  CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
  //calculate the elapsed time
  float elapsedTime;
  cudaEventElapsedTime(&elapsedTime, start, stop);

  printf("Elapsed Time : %f\n",elapsedTime);

1 Ответ

0 голосов
/ 20 октября 2018

Невозможно напрямую присвоить значение половинной переменной на хосте.

Я бы предложил перейти на CUDA 10. Это значительно упростило использование half типа данных ..

Однако следующий пример должен работать одинаково, независимо от того, используете ли вы CUDA 9.2 или CUDA 10:

$ cat t304.cu
#include <mma.h>
#include <iostream>

using namespace nvcuda;

__global__ void wmma_ker(half *a, half *b, float *c) {
   // Declare the fragments
   wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::col_major> a_frag;
   wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::row_major> b_frag;
   wmma::fragment<wmma::accumulator, 16, 16, 16, float> c_frag;

   // Initialize the output to zero
   wmma::fill_fragment(c_frag, 0.0f);

   // Load the inputs
   wmma::load_matrix_sync(a_frag, a, 16);
   wmma::load_matrix_sync(b_frag, b, 16);

   // Perform the matrix multiplication
   wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);

   // Store the output
   wmma::store_matrix_sync(c, c_frag, 16, wmma::mem_row_major);
}

int main(){

  half *d_a, *h_a, *d_b, *h_b;
  float *d_c, *h_c;
  h_c = new float[16*16];
  h_b = new half[16*16];
  h_a = new half[16*16];
  cudaMalloc(&d_a, 16*16*sizeof(half));
  cudaMalloc(&d_b, 16*16*sizeof(half));
  cudaMalloc(&d_c, 16*16*sizeof(float));
  for (int i = 0; i < 16*16; i++) {
    h_a[i] = 1.0f;
    h_b[i] = 1.0f;}
  cudaMemcpy(d_a, h_a, 16*16*sizeof(half), cudaMemcpyHostToDevice);
  cudaMemcpy(d_b, h_b, 16*16*sizeof(half), cudaMemcpyHostToDevice);
  wmma_ker<<<1,32>>>(d_a, d_b, d_c);
  cudaMemcpy(h_c, d_c, 16*16*sizeof(float), cudaMemcpyDeviceToHost);
  for (int i = 0; i < 16*16; i++) std::cout << h_c[i] << ",";
  std::cout << std::endl;
}
$ nvcc -arch=sm_70 -o t304 t304.cu
$ cuda-memcheck ./t304
========= CUDA-MEMCHECK
16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
========= ERROR SUMMARY: 0 errors
$

Чтобы получить измерение времени ядра, вы можете использовать синхронизацию на основе cudaEvent, но мне кажется проще просто использовать nvprof:

$ nvprof ./t304
==28135== NVPROF is profiling process 28135, command: ./t304
16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
==28135== Profiling application: ./t304
==28135== Profiling result:
            Type  Time(%)      Time     Calls       Avg       Min       Max  Name
 GPU activities:   42.97%  3.2320us         2  1.6160us  1.4080us  1.8240us  [CUDA memcpy HtoD]
                   28.52%  2.1450us         1  2.1450us  2.1450us  2.1450us  [CUDA memcpy DtoH]
                   28.51%  2.1440us         1  2.1440us  2.1440us  2.1440us  wmma_ker(__half*, __half*, float*)
      API calls:   98.42%  498.63ms         3  166.21ms  5.2170us  498.61ms  cudaMalloc
                    1.06%  5.3834ms       384  14.019us     347ns  568.79us  cuDeviceGetAttribute
                    0.38%  1.9473ms         4  486.83us  250.95us  1.1810ms  cuDeviceTotalMem
                    0.10%  493.31us         4  123.33us  109.62us  140.63us  cuDeviceGetName
                    0.01%  68.566us         1  68.566us  68.566us  68.566us  cudaLaunchKernel
                    0.01%  67.104us         3  22.368us  9.6850us  30.563us  cudaMemcpy
                    0.00%  22.628us         4  5.6570us  3.1910us  9.2200us  cuDeviceGetPCIBusId
                    0.00%  8.6020us         8  1.0750us     540ns  1.6570us  cuDeviceGet
                    0.00%  5.8370us         3  1.9450us     443ns  3.7760us  cuDeviceGetCount
                    0.00%  2.7590us         4     689ns     600ns     843ns  cuDeviceGetUuid
...