Вы можете моделировать 2D-массив в 1D, сохраняя данные построчно.Так что двумерный массив: [a, b] [c, d] становится [a, b, c, d].Для простоты вы можете написать класс-оболочку, обеспечивающий такую функциональность.
Вот демонстрационная версия (не на 100% защищенная от стихийных бедствий, но работающая) этой идеи
#pragma once
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
typedef int TYPE;
// NOTE: copy consturctor and = operator need to be overloaded as well
template<class T>
struct Matrix
{
Matrix(int r, int c) : rows(r), cols(c) {
data = new T[r*c];
}
~Matrix() {
// As we allocated memory it needs to be freed upon destruction
delete[] data;
data = nullptr;
}
int rows, cols;
T* data;
T* operator[](int row) {
// Returns pointer to "ROW", further call to [] on result will retrieve item at column in this row
return data + (row*cols);
}
};
// Simple cuda kernel
__global__ void add(TYPE *a, TYPE *b, TYPE *c, int rows, int cols) {
// Get element row and col
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// If kernel block/grid is not sized perfectly make sure not to step outside data bounds
if(row < rows && col < cols)
{
int idx = row*cols + col;
c[idx] = a[idx] + b[idx];
}
}
int main() {
// m3 = m1 + m2 using cuda
int rows = 5, cols = 5, total = rows * cols;
Matrix<TYPE> m1{ rows,cols }, m2{ rows,cols }, m3{ rows,cols };
// Initialization as 1D array
for(int i = 0; i < total; i++) {
m1.data[i] = i;
}
// Or initialization as 2D array
for(int r = 0; r < rows; r++)
for(int c = 0; c < cols; c++)
m2[r][c] = r*cols + c + 100;
for(int i = 0; i < total; i++) std::cout << m1.data[i] << ", ";
std::cout << "\n";
for(int r = 0; r < rows; r++) {
for(int c = 0; c < cols; c++)
std::cout << m2[r][c] << ", ";
std::cout << "\n";
}
// CUDA part
TYPE *d_m1, *d_m2, *d_m3;
// Allocation
cudaMalloc((void **) &d_m1, total * sizeof(TYPE));
cudaMalloc((void **) &d_m2, total * sizeof(TYPE));
cudaMalloc((void **) &d_m3, total * sizeof(TYPE));
// Copy m1 and m2 to GPU
cudaMemcpy(d_m1, m1.data, total * sizeof(TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(d_m2, m2.data, total * sizeof(TYPE), cudaMemcpyHostToDevice);
// Oversized on purpose to show row/col guard on add kernel
dim3 grid(5, 5);
dim3 block(5, 5);
add <<< grid, block >>> (d_m1, d_m2, d_m3, rows, cols);
// Copy result to m3
cudaMemcpy(m3.data, d_m3, total * sizeof(TYPE), cudaMemcpyDeviceToHost);
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_m3);
for(int r = 0; r < rows; r++) {
for(int c = 0; c < cols; c++)
std::cout << m3[r][c] << ", ";
std::cout << "\n";
}
system("pause");
return 0;
}