How do you use __constant__ in a cuda graph? - cuda

I'm trying to use the a cuda graph to join a bunch of kernels which use __constant__ parameters to set values that won't change. A small test case:
#include <iostream>
#include <vector>
template<typename T>
__constant__ T scalar[1];
__constant__ size_t size_a[1];
template<typename T>
__global__ void set_buffer(T *buffer) {
const size_t index = blockIdx.x*blockDim.x + threadIdx.x;
buffer[min(index, size_a[0])] = scalar<T>[0];
}
int main(int argc, const char * argv[]) {
cudaGraph_t graph;
cudaGraphCreate(&graph, 0);
cudaStream_t stream;
cudaStreamCreate(&stream);
const double src = 10.0;
cudaGraphNode_t node_a;
cudaGraphAddMemcpyNodeToSymbol(&node_a, graph, NULL, 0,
scalar<double>, &src, sizeof(double),
0, cudaMemcpyHostToDevice);
const size_t length = 100;
cudaGraphNode_t node_b;
cudaGraphAddMemcpyNodeToSymbol(&node_b, graph, NULL, 0,
size_a, &length, sizeof(size_t),
0, cudaMemcpyHostToDevice);
double *result;
cudaMalloc(&result, length*sizeof(double));
cudaKernelNodeParams params;
params.func = (void *) set_buffer<double>;
params.gridDim = dim3(1, 1, 1);
params.blockDim = dim3(512, 1, 1);
params.sharedMemBytes = 0;
params.kernelParams = {reinterpret_cast<void **> (&result)};
params.extra = NULL;
cudaGraphNode_t node_c;
std::vector<cudaGraphNode_t> deps = {node_a, node_b};
cudaGraphAddKernelNode(&node_c, graph, deps.data(), deps.size(),
&params);
cudaGraphExec_t exec;
cudaGraphInstantiate(&exec, graph, NULL, NULL, 0);
cudaGraphLaunch(exec, stream);
cudaStreamSynchronize(stream);
std::vector<double> host_result(length);
cudaMemcpy(host_result.data(),
result, length*sizeof(double),
cudaMemcpyDeviceToHost);
for (size_t i = 0; i < length; i++) {
std::cout << i << " " << host_result[i] << std::endl;
}
}
When I run this, it doesn't look like it cudaGraphAddMemcpyNodeToSymbol is doing anything. Because when I run it, it prints out.
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
9 0
10 0
...
90 0
91 0
92 0
93 0
94 0
95 0
96 0
97 0
98 0
99 0
Why isn't the kernel changing values in my buffer?

When I run your code I get a seg fault. Whether you get a seg fault or not isn't really the issue.
The problem is here:
params.kernelParams = {reinterpret_cast<void **> (&result)};
That is not correct. A correct method to do it is as follows:
void *kernelargs[1] = {(void *)&result};
params.kernelParams = kernelargs;
You have an off-by-one error also:
buffer[min(index, size_a[0])] = scalar<T>[0];
^^^^^^^^^^^^^^^^^^^^^
The value you are putting into size_a[0] is 100, so any thread whose index is 100 or greater will attempt to index into buffer at element whose index is 100, which is out of range for your allocation.

Related

CUBLAS Sgemm confusing results

For two matrices X and Q of size 4x3 and 2x3
which in memory look like
x = [0 1 2 3 4 5 6 7 8 9 10 11]
q = [3 4 5 6 7 8]
I tried to use cublas multiplication cublasSgemm, but I couldn't manage to get expected results.
Since they are stored in row-major order so they should be interpreted as 3x4 and 3x2 so it seemed for me that
cublasSgemm(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
q_rows_num, x_rows_num, dim,
&alpha, // 1
q_device, q_rows_num,
x, x_rows_num,
&beta, // 0
x_q_multiplication, q_rows_num);
where
dim = 3
x_rows_num = 4
q_rows_num = 2
would work but in that case I got error
** On entry to SGEMM parameter number 8 had an illegal value
I also tried shuffling parameters a bit but I couldn't find any setup that would work.
So is it possible to multiply them without changing to column-major order?
EDIT:
So I got exepected results with changes made in this working example:
#include <cublas_v2.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
int main()
{
int x_rows_num = 4;
int q_rows_num = 2;
int dim = 3;
int N = x_rows_num*dim;
int M = q_rows_num*dim;
float *x, *q, *x_q_multiplication;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&q, M*sizeof(float));
cudaMallocManaged(&x_q_multiplication, q_rows_num*x_rows_num*dim);
for (int i = 0; i< N; i++) x[i] = i*1.0f;
for (int i = 0; i< M; i++) q[i] = (i + 3)*1.0f;
float *q_device;
cudaMallocManaged(&q_device, M*sizeof(float));
cudaMemcpy(q_device, q, M*sizeof(float), cudaMemcpyHostToDevice);
cublasHandle_t handle;
cublasCreate(&handle);
float alpha = 1.f;
float beta = 0.f;
cublasSgemm(handle,
CUBLAS_OP_T, CUBLAS_OP_N,
x_rows_num, q_rows_num, dim,
&alpha,
x, dim,
q, dim,
&beta,
x_q_multiplication, x_rows_num);
cudaDeviceSynchronize();
for (int i = 0; i < q_rows_num*x_rows_num; i++) std::cout << x_q_multiplication[i] << " ";
cudaFree(x);
cudaFree(q);
cudaFree(x_q_multiplication);
return 0;
}
However I'am still not sure why dim became leading dimension
Your original CUBLAS call:
cublasSgemm(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
q_rows_num, x_rows_num, dim,
&alpha, // 1
q_device, q_rows_num,
x, x_rows_num,
&beta, // 0
x_q_multiplication, q_rows_num);
was close to correct. Your interpretation of what the leading dimensions should be was correct. What you got wrong was the Op specifiers. If both matrices are row major ordered and the first array needs to be read in its (row major) transposed order, then the operation should be:
#include <cublas_v2.h>
#include <cstring>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
int main()
{
int x_rows_num = 4;
int q_rows_num = 2;
int dim = 3;
int N = x_rows_num*dim;
int M = q_rows_num*dim;
float x0[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
float q0[6] = {3, 4, 5, 6, 7, 8 };
float *x, *q, *x_q_multiplication;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&q, M*sizeof(float));
cudaMallocManaged(&x_q_multiplication, q_rows_num*x_rows_num*dim);
std::memcpy(x, x0, N*sizeof(float));
std::memcpy(q, q0, M*sizeof(float));
float *q_device;
cudaMallocManaged(&q_device, M*sizeof(float));
cudaMemcpy(q_device, q, M*sizeof(float), cudaMemcpyHostToDevice);
cublasHandle_t handle;
cublasCreate(&handle);
float alpha = 1.f;
float beta = 0.f;
cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
q_rows_num, x_rows_num, dim,
&alpha, // 1
q_device, q_rows_num,
x, x_rows_num,
&beta, // 0
x_q_multiplication, q_rows_num);
cudaDeviceSynchronize();
for (int i = 0; i < q_rows_num*x_rows_num; i++) std::cout << x_q_multiplication[i] << " "; std::cout << std::endl;
cudaFree(x);
cudaFree(q);
cudaFree(x_q_multiplication);
return 0;
}
which does this for me:
$ nvcc -arch=sm_52 cublas_trans.cu -o cublas_trans -lcublas
$ ./cublas_trans
76 88 91 106 106 124 121 142
and which I believe is the correct answer.
Incidentally, Robert Crovella's now deleted comment, which you say you take offense to was 100% correct. I suspect he read, as I did, your original CUBLAS call, interpreted the arguments and concluded, as I did, and as CUBLAS itself did, that you are trying to multiply a 3x4 matrix and a 3x2 matrix. Which is why the invalid argument error was raised.

Binary numbers in c program

I'm trying to write a program that prompts the user to enter a positive integer N and prints the set of all binary strings of length N. For example, if the user enters 3, the program prints:
0 0 0
0 0 1
0 1 0
0 1 1
1 0 0
1 0 1
1 1 0
1 1 1
I have been trying this for a while but I was unable to do it. If someone can help me with this I will appreciate it!! Thanks
The problem is a typical backtrack problem in algorithm, I made the example code in C language here, you can have a reference.
#include <stdio.h>
#include <stdlib.h>
void getAllBi(int depth, int A[], int n) {
if(depth >= n) {
int i;
for(i = 0; i < n; ++i) {
printf("%d ", A[i]);
}
printf("\n");
return;
}
A[depth] = 0;
getAllBi(depth+1, A, n);
A[depth] = 1;
getAllBi(depth+1, A, n);
}
int main() {
int n;
scanf("%d", &n);
int *A = (int*)malloc(n * sizeof(int));
getAllBi(0, A, n);
return 0;
}

how to set values in constant structure in cuda [duplicate]

I am trying to use the constant memory in the code with constant memory assigned value from kernel not using cudacopytosymbol.
#include <iostream>
using namespace std;
#define N 10
//__constant__ int constBuf_d[N];
__constant__ int *constBuf;
__global__ void foo( int *results )
{
int tdx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tdx;
if( idx < N )
{
constBuf[idx]=1;
results[idx] = constBuf[idx];
}
}
// main routine that executes on the host
int main(int argc, char* argv[])
{
int *results_h = new int[N];
int *results_d;
cudaMalloc((void **)&results_d, N*sizeof(int));
foo <<< 1, 10 >>> ( results_d );
cudaMemcpy(results_h, results_d, N*sizeof(int), cudaMemcpyDeviceToHost);
for( int i=0; i < N; ++i )
printf("%i ", results_h[i] );
delete(results_h);
}
output shows
6231808 6226116 0 0 0 0 0 0 0 0
I want the program to print the value assigned to constant memory through the kenel in the code.
Constant memory is, as the name implies, constant/read-only with respect to device code. What you are trying to do is illegal and can't be made to work.
To set values in constant memory, you currently have two choices:
set the value from host code via the cudaMemcpyToSymbol API call (or its equivalents)
use static initialisation at compile time
In the latter case something like this would work:
__constant__ int constBuf[N] = { 16, 2, 77, 40, 12, 3, 5, 3, 6, 6 };
__global__ void foo( int *results )
{
int tdx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tdx;
if( tdx < N )
{
results[idx] = constBuf[tdx]; // Note changes here!
}
}

using constant memory prints address instead of value in cuda

I am trying to use the constant memory in the code with constant memory assigned value from kernel not using cudacopytosymbol.
#include <iostream>
using namespace std;
#define N 10
//__constant__ int constBuf_d[N];
__constant__ int *constBuf;
__global__ void foo( int *results )
{
int tdx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tdx;
if( idx < N )
{
constBuf[idx]=1;
results[idx] = constBuf[idx];
}
}
// main routine that executes on the host
int main(int argc, char* argv[])
{
int *results_h = new int[N];
int *results_d;
cudaMalloc((void **)&results_d, N*sizeof(int));
foo <<< 1, 10 >>> ( results_d );
cudaMemcpy(results_h, results_d, N*sizeof(int), cudaMemcpyDeviceToHost);
for( int i=0; i < N; ++i )
printf("%i ", results_h[i] );
delete(results_h);
}
output shows
6231808 6226116 0 0 0 0 0 0 0 0
I want the program to print the value assigned to constant memory through the kenel in the code.
Constant memory is, as the name implies, constant/read-only with respect to device code. What you are trying to do is illegal and can't be made to work.
To set values in constant memory, you currently have two choices:
set the value from host code via the cudaMemcpyToSymbol API call (or its equivalents)
use static initialisation at compile time
In the latter case something like this would work:
__constant__ int constBuf[N] = { 16, 2, 77, 40, 12, 3, 5, 3, 6, 6 };
__global__ void foo( int *results )
{
int tdx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tdx;
if( tdx < N )
{
results[idx] = constBuf[tdx]; // Note changes here!
}
}

The different addressing modes of CUDA textures

I am using a CUDA texture in border addressing mode (cudaAddressModeBorder). I am reading texture coordinates using tex2D<float>(). When the texture coordinates fall outside the texture, tex2D<float>() returns 0.
How can I change this returned border value from 0 to something else? I could check the texture coordinate manually and set the border value myself. I was wondering if there was CUDA API where I can set such a border value.
As mentioned by sgarizvi, CUDA supports only four, non-customizable address modes, namely, clamp, border, wrap and mirror, which are described in Section 3.2.11.1. of the CUDA programming guide.
The former two work in both unnormalized and normalized coordinates, while the latter two in normalized coordinates only.
To describe the first two, let us consider the unnormalized coordinates case and consider 1D signals, for the sake of simplicity. In this case, the input sequence is c[k], with k=0,...,M-1.
cudaAddressModeClamp
The signal c[k] is continued outside k=0,...,M-1 so that c[k] = c[0] for k < 0, and c[k] = c[M-1] for k >= M.
cudaAddressModeBorder
The signal c[k] is continued outside k=0,...,M-1 so that c[k] = 0 for k < 0and for k >= M.
Now, to describe the last two address modes, we are forced to consider normalized coordinates, so that the 1D input signal samples are assumed to be c[k / M], with k=0,...,M-1.
cudaAddressModeWrap
The signal c[k / M] is continued outside k=0,...,M-1 so that it is periodic with period equal to M. In other words, c[(k + p * M) / M] = c[k / M] for any (positive, negative or vanishing) integer p.
cudaAddressModeMirror
The signal c[k / M] is continued outside k=0,...,M-1 so that it is periodic with period equal to 2 * M - 2. In other words, c[l / M] = c[k / M] for any l and k such that (l + k)mod(2 * M - 2) = 0.
The following code illustrates all the four available address modes
#include <stdio.h>
texture<float, 1, cudaReadModeElementType> texture_clamp;
texture<float, 1, cudaReadModeElementType> texture_border;
texture<float, 1, cudaReadModeElementType> texture_wrap;
texture<float, 1, cudaReadModeElementType> texture_mirror;
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/******************************/
/* CUDA ADDRESS MODE CLAMPING */
/******************************/
__global__ void Test_texture_clamping(const int M) {
printf("Texture clamping - i = %i; value = %f\n", -threadIdx.x, tex1D(texture_clamp, -(float)threadIdx.x));
printf("Texture clamping - i = %i; value = %f\n", M + threadIdx.x, tex1D(texture_clamp, (float)(M + threadIdx.x)));
}
/****************************/
/* CUDA ADDRESS MODE BORDER */
/****************************/
__global__ void Test_texture_border(const int M) {
printf("Texture border - i = %i; value = %f\n", -threadIdx.x, tex1D(texture_border, -(float)threadIdx.x));
printf("Texture border - i = %i; value = %f\n", M + threadIdx.x, tex1D(texture_border, (float)(M + threadIdx.x)));
}
/**************************/
/* CUDA ADDRESS MODE WRAP */
/**************************/
__global__ void Test_texture_wrap(const int M) {
printf("Texture wrap - i = %i; value = %f\n", -threadIdx.x, tex1D(texture_wrap, -(float)threadIdx.x/(float)M));
printf("Texture wrap - i = %i; value = %f\n", M + threadIdx.x, tex1D(texture_wrap, (float)(M + threadIdx.x)/(float)M));
}
/****************************/
/* CUDA ADDRESS MODE MIRROR */
/****************************/
__global__ void Test_texture_mirror(const int M) {
printf("Texture mirror - i = %i; value = %f\n", -threadIdx.x, tex1D(texture_mirror, -(float)threadIdx.x/(float)M));
printf("Texture mirror - i = %i; value = %f\n", M + threadIdx.x, tex1D(texture_mirror, (float)(M + threadIdx.x)/(float)M));
}
/********/
/* MAIN */
/********/
void main(){
const int M = 4;
// --- Host side memory allocation and initialization
float *h_data = (float*)malloc(M * sizeof(float));
for (int i=0; i<M; i++) h_data[i] = (float)i;
// --- Texture clamping
cudaArray* d_data_clamping = NULL; gpuErrchk(cudaMallocArray(&d_data_clamping, &texture_clamp.channelDesc, M, 1));
gpuErrchk(cudaMemcpyToArray(d_data_clamping, 0, 0, h_data, M * sizeof(float), cudaMemcpyHostToDevice));
cudaBindTextureToArray(texture_clamp, d_data_clamping);
texture_clamp.normalized = false;
texture_clamp.addressMode[0] = cudaAddressModeClamp;
dim3 dimBlock(2 * M, 1); dim3 dimGrid(1, 1);
Test_texture_clamping<<<dimGrid,dimBlock>>>(M);
printf("\n\n\n");
// --- Texture border
cudaArray* d_data_border = NULL; gpuErrchk(cudaMallocArray(&d_data_border, &texture_border.channelDesc, M, 1));
gpuErrchk(cudaMemcpyToArray(d_data_border, 0, 0, h_data, M * sizeof(float), cudaMemcpyHostToDevice));
cudaBindTextureToArray(texture_border, d_data_border);
texture_border.normalized = false;
texture_border.addressMode[0] = cudaAddressModeBorder;
Test_texture_border<<<dimGrid,dimBlock>>>(M);
printf("\n\n\n");
// --- Texture wrap
cudaArray* d_data_wrap = NULL; gpuErrchk(cudaMallocArray(&d_data_wrap, &texture_wrap.channelDesc, M, 1));
gpuErrchk(cudaMemcpyToArray(d_data_wrap, 0, 0, h_data, M * sizeof(float), cudaMemcpyHostToDevice));
cudaBindTextureToArray(texture_wrap, d_data_wrap);
texture_wrap.normalized = true;
texture_wrap.addressMode[0] = cudaAddressModeWrap;
Test_texture_wrap<<<dimGrid,dimBlock>>>(M);
printf("\n\n\n");
// --- Texture mirror
cudaArray* d_data_mirror = NULL; gpuErrchk(cudaMallocArray(&d_data_mirror, &texture_mirror.channelDesc, M, 1));
gpuErrchk(cudaMemcpyToArray(d_data_mirror, 0, 0, h_data, M * sizeof(float), cudaMemcpyHostToDevice));
cudaBindTextureToArray(texture_mirror, d_data_mirror);
texture_mirror.normalized = true ;
texture_mirror.addressMode[0] = cudaAddressModeMirror;
Test_texture_mirror<<<dimGrid,dimBlock>>>(M);
printf("\n\n\n");
}
Those are the outputs
index -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10 11
clamp 0 0 0 0 0 0 0 0 1 2 3 3 3 3 3 3 3 3 3
border 0 0 0 0 0 0 0 0 1 2 3 0 0 0 0 0 0 0 0
wrap 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3
mirror 1 2 3 3 2 1 0 0 1 2 3 3 2 1 0 0 1 2 3
As of now (CUDA 5.5), the CUDA texture fetch behavior is not customizable. Only 1 of the 4 automatic built-in modes (i.e. Border, Clamp, Wrap and Mirror) can be utilized for out of range texture fetch.