I need to make my kernel communicate with the host. I tried to use a global counter (better ways are well accepted), but the following code prints always 0. What am I doing wrong? (I tried both commented and uncommented ways).
#include <stdio.h>
#include <cuda_runtime.h>
//__device__ int count[1] = {0};
__device__ int count = 0;
__global__ void inc() {
//count[0]++;
atomicAdd(&count, 1);
}
int main(void) {
inc<<<1,10>>>();
cudaDeviceSynchronize();
//int *c;
int c;
cudaMemcpyFromSymbol(&c, count, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", c);
return 0;
}
Anytime you are having trouble with a CUDA code, I strongly encourage you to use proper CUDA error checking and run your code with cuda-memcheck, before asking others for help. Even if you don't understand the error output, providing it in your question will be useful for those trying to help you.
If you had done so, you would have received a report that cudaMemcpyFromSymbol is throwing an invalid argument error.
If you study the documentation for that function call, you will see that the 4th parameter is not the direction parameter, but is the offset parameter. So providing cudaMemcpyDeviceToHost is incorrect for the offset parameter. Since cudaMemcpyFromSymbol is always a device->host transfer, providing the direction argument is redundant, and since it is provided a default, is unnecessary. Your code works correctly for me simply by eliminating that:
$ cat t1414.cu
#include <stdio.h>
#include <cuda_runtime.h>
//__device__ int count[1] = {0};
__device__ int count = 0;
__global__ void inc() {
//count[0]++;
atomicAdd(&count, 1);
}
int main(void) {
inc<<<1,10>>>();
cudaDeviceSynchronize();
//int *c;
int c;
cudaMemcpyFromSymbol(&c, count, sizeof(int));
printf("%d\n", c);
return 0;
}
$ nvcc -o t1414 t1414.cu
$ cuda-memcheck ./t1414
========= CUDA-MEMCHECK
10
========= ERROR SUMMARY: 0 errors
$
Related
I'm working through the examples of the "CUDA by Example" book. The following code doesn't give me an answer and work as it should. Where's the mistake?
Will appreciate your help and answers.
I get an output,which reads
Calculation done on GPU yields the answer: &d
Press enter to stop
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
__global__ void add_integers_cuda(int a, int b, int *c)
{
*c = a + b;
}
int main(void)
{
int c;
int *dev_ptr;
cudaMalloc((void **)&dev_ptr, sizeof(int)); //allocate sizeof(int) bytes of contiguous memory in the gpu device and return the address of first byte to dev_ptr.
// call the kernel
add_integers_cuda <<<1,1>>>(2,7,dev_ptr);
cudaMemcpy(&c, dev_ptr, sizeof(int), cudaMemcpyDeviceToHost);
printf("Calculation done on GPU yields the answer: &d\n",c );
cudaFree(dev_ptr);
printf("Press enter to stop.");
cin.ignore(255, '\n');
return 0;
}
"
&d is not a correct printf formatting character here:
printf("Calculation done on GPU yields the answer: &d\n",c );
You won't get the output you are expecting.
You should use %d instead:
printf("Calculation done on GPU yields the answer: %d\n",c );
This particular issue has nothing to do with CUDA of course.
You may also want to run CUDA codes with cuda-memcheck and/or use proper CUDA error checking if you are just learning and having trouble. Neither of those would have pointed out the above error, however.
I am new to cuda. I wrote a kernel to create an identity matrix(GPUsetIdentity) of dimension sizeXsize. Further inside a function GPUfunctioncall, I called my kernel. The identity matrix should be stored in dDataInv. But when I copy it back to dataOut sizexsize , all the values are zero. I know, I am doing something very stupid somewhere, but couldnt get it, I am new to cuda, if anyone can point my mistake. Thanks.
#include <stdio.h>
#include <malloc.h>
#include <memory.h>
#include <math.h>
#include <stdlib.h>
#include <iostream>
#include <stdlib.h>
#include <string>
#include <fstream>
#include <iterator>
#include <sstream>
#include <vector>
#include <cstring>
#include <cstdlib>
#include <ctime>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "cuda.h"
#define BLOCKSIZE 16
using namespace std;
__global__ void GPUsetIdentity (float* matrix, int width)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int offset = bx * BLOCKSIZE + tx;
matrix[offset + width * offset] = 1;
}
void print_matrix_host(float* A , int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
std::cout << A[i * nr_rows_A + j ] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
int GPUfunctioncall (float* hDataOut, int size){
float *dDataInv;
cudaMalloc ((void **) &dDataInv, size);
cudaMemset ((void *) dDataInv, 0, size);
dim3 idyThreads (BLOCKSIZE);
dim3 idyBlocks (size / BLOCKSIZE);
GPUsetIdentity <<< idyBlocks, idyThreads >>> (dDataInv, size);
cudaThreadSynchronize ();
cudaMemcpy ((void *) hDataOut, (void *) dDataInv, size, cudaMemcpyDeviceToHost);
cudaFree (dDataInv);
return 0;
}
int main()
{
int size = 4;
float* dataOut;
dataOut = new float[size*size];
GPUfunctioncall(dataOut, size);
print_matrix_host(dataOut, size, size);
}
Any time you are having trouble with a CUDA code, it's good practice to use proper cuda error checking. You can also run your code with cuda-memcheck to get a quick read on whether there are any errors.
Using either of these methods, you would have discovered an "invalid configuration error" on your kernel launch. This usually means that the parameters in the <<< >>> syntax are incorrect. When you run into this type of error, simply printing out those values may indicate the problem.
In your case, this line of code:
dim3 idyBlocks (size / BLOCKSIZE);
results in a value of 0 for idyBlocks when size is 4 and BLOCKSIZE is 16. So you are requesting a kernel launch of 0 blocks which is illegal. Therefore your kernel is not running and your results are not what you expect.
There are a variety of ways to solve this, many of them involving detecting this condition and adding an "extra block" when size is not evenly divisible by BLOCKSIZE. Using this approach, we may be launching "extra threads", so we must include a "thread check" in the kernel to prevent those extra threads from doing anything (such as accessing arrays out of bounds). For this, we often need to know the intended size in the kernel, and we can pass this value as an extra kernel parameter.
You've also made some errors in your handling of device variables. The following code:
dataOut = new float[size*size];
allocates enough space for a square matrix of dimension size. But the following code:
cudaMalloc ((void **) &dDataInv, size);
only allocates enough space for size bytes. You want size*size*sizeof(float) instead of size here, and likewise you want it in the following cudaMemset and cudaMemcpy operations. cudaMalloc, cudaMemset and cudaMemcpy require a size parameter in bytes, just like malloc, memset, and memcpy. This error is found in your usage of cudaMemset and cudaMemcpy as well.
The following code has those modifications, and seems to work correctly for me:
$ cat t580.cu
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define BLOCKSIZE 16
using namespace std;
__global__ void GPUsetIdentity (float* matrix, int width, int size)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int offset = bx * BLOCKSIZE + tx;
if (tx < size)
matrix[offset + width * offset] = 1;
}
void print_matrix_host(float* A , int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
std::cout << A[i * nr_rows_A + j ] << " ";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
int GPUfunctioncall (float* hDataOut, int size){
float *dDataInv;
cudaMalloc ((void **) &dDataInv, size*size*sizeof(float));
cudaMemset ((void *) dDataInv, 0, size*size*sizeof(float));
dim3 idyThreads (BLOCKSIZE);
int num_blocks = size/BLOCKSIZE + (size%BLOCKSIZE)?1:0;
dim3 idyBlocks (num_blocks);
GPUsetIdentity <<< idyBlocks, idyThreads >>> (dDataInv, size, size);
cudaThreadSynchronize ();
cudaMemcpy ((void *) hDataOut, (void *) dDataInv, size*size*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree (dDataInv);
return 0;
}
int main()
{
int size = 4;
float* dataOut;
dataOut = new float[size*size];
GPUfunctioncall(dataOut, size);
print_matrix_host(dataOut, size, size);
}
$ nvcc -arch=sm_20 -o t580 t580.cu
$ cuda-memcheck ./t580
========= CUDA-MEMCHECK
1 0 0 0
0 1 0 0
0 0 1 0
0 0 0 1
========= ERROR SUMMARY: 0 errors
$
Note that it may be redundant to pass size twice to the kernel. For this particular example, we could have easily used the width parameter to do our kernel "thread check". But for educational purposes, I chose to call it out as a separate parameter, because in the general case you will often pass it as a separate parameter to other kernels that you write.
Finally, note that cudaThreadSynchronize() is deprecated and should be replaced with cudaDeviceSynchronize() instead. In this particular example, niether are actually necessary, as the next cudaMemcpy operation will force the same kind of synchronization, but you may use it if you decide to add cuda error checking to your code (recommended).
I have a problem! I need to initialize a constant global array in cuda c. To initialize the array i need to use a for! I need to do this because I have to use this array in some kernels and my professor told me to define as a constant visible only in the device.
How can I do this??
I want to do something like this:
#include <stdio.h>
#include <math.h>
#define N 8
__constant__ double H[N*N];
__global__ void prodotto(double *v, double *w){
int k=threadIdx.x+blockDim.x*blockIdx.x;
w[k]=0;
for(int i=0;i<N;i++) w[k]=w[k]+H[k*N+i]*v[i];
}
int main(){
double v[8]={1, 1, 1, 1, 1, 1, 1, 1};
double *dev_v, *dev_w, *w;
double *host_H;
host_H=(double*)malloc((N*N)*sizeof(double));
cudaMalloc((void**)&dev_v,sizeof(double));
cudaMalloc((void**)&dev_w,sizeof(double));
for(int k=0;k<N;k++){
host_H[2*N*k+2*k]=1/1.414;
host_H[2*N*k+2*k+1]=1/1.414;
host_H[(2*k+1)*N+2*k]=1/1.414;
host_H[(2*k+1)+2*k+1]=-1/1.414;
}
cudaMemcpyToSymbol(H, host_H, (N*N)*sizeof(double));
cudaMemcpy(dev_v, v, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_w, w, N*sizeof(double), cudaMemcpyHostToDevice);
prodotto<<<1,N>>>(dev_v, dev_w);
cudaMemcpy(v, dev_v, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(w, dev_w, N*sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++) printf("\n%f %f", v[i], w[i]);
return 0;
}
But the output is an array of zeros...I want the output array to be filled with the product of the matrix H(here seen as an array) and the array v.
Thanks !!!!!
Something like this should work:
#define DSIZE 32
__constant__ int mydata[DSIZE];
int main(){
...
int *h_mydata;
h_mydata = new int[DSIZE];
for (int i = 0; i < DSIZE; i++)
h_mydata[i] = ....; // initialize however you wish
cudaMemcpyToSymbol(mydata, h_mydata, DSIZE*sizeof(int));
...
}
Not difficult. You can then use the __constant__ data directly in a kernel:
__global__ void mykernel(...){
...
int myval = mydata[threadIdx.x];
...
}
You can read about __constant__ variables in the programming guide. __constant__ variables are read-only from the perspective of device code (kernel code). But from the host, they can be read from or written to using the cudaMemcpyToSymbol/cudaMemcpyFromSymbol API.
EDIT: Based on the code you've now posted, there were at least 2 errors:
Your allocation sizes for dev_v and dev_w were not correct.
You had no host allocation for w.
The following code seems to work correctly for me with those 2 fixes:
$ cat t579.cu
#include <stdio.h>
#include <math.h>
#define N 8
__constant__ double H[N*N];
__global__ void prodotto(double *v, double *w){
int k=threadIdx.x+blockDim.x*blockIdx.x;
w[k]=0;
for(int i=0;i<N;i++) w[k]=w[k]+H[k*N+i]*v[i];
}
int main(){
double v[N]={1, 1, 1, 1, 1, 1, 1, 1};
double *dev_v, *dev_w, *w;
double *host_H;
host_H=(double*)malloc((N*N)*sizeof(double));
w =(double*)malloc( (N)*sizeof(double));
cudaMalloc((void**)&dev_v,N*sizeof(double));
cudaMalloc((void**)&dev_w,N*sizeof(double));
for(int k=0;k<N;k++){
host_H[2*N*k+2*k]=1/1.414;
host_H[2*N*k+2*k+1]=1/1.414;
host_H[(2*k+1)*N+2*k]=1/1.414;
host_H[(2*k+1)+2*k+1]=-1/1.414;
}
cudaMemcpyToSymbol(H, host_H, (N*N)*sizeof(double));
cudaMemcpy(dev_v, v, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_w, w, N*sizeof(double), cudaMemcpyHostToDevice);
prodotto<<<1,N>>>(dev_v, dev_w);
cudaMemcpy(v, dev_v, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(w, dev_w, N*sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++) printf("\n%f %f", v[i], w[i]);
printf("\n");
return 0;
}
$ nvcc -arch=sm_20 -o t579 t579.cu
$ cuda-memcheck ./t579
========= CUDA-MEMCHECK
1.000000 0.000000
1.000000 -0.707214
1.000000 -0.707214
1.000000 -1.414427
1.000000 1.414427
1.000000 0.707214
1.000000 1.414427
1.000000 0.707214
========= ERROR SUMMARY: 0 errors
$
A few notes:
Any time you're having trouble with a CUDA code, it's good practice to use proper cuda error checking.
You can run your code with cuda-memcheck (just as I have above) to get a quick read of whether any CUDA errors are encountered.
I've not verified the numerical results or worked through the math. If it's not what you wanted, I assume you can sort it out.
I've not made any changes to your code other than what seemed sensible to me to fix the obvious errors and make the results presentable for educational purposes. Certainly there can be discussions about preferred allocation methods, printf vs. cout, and what have you. I'm focused primarily on CUDA topics in this answer.
I am trying to apply a kernel function on a __device__ variable, which, according to the specs, resides "in global memory"
#include <stdio.h>
#include "sys_data.h"
#include "my_helper.cuh"
#include "helper_cuda.h"
#include <cuda_runtime.h>
double X[10] = {1,-2,3,-4,5,-6,7,-8,9,-10};
double Y[10] = {0};
__device__ double DEV_X[10];
int main(void) {
checkCudaErrors(cudaMemcpyToSymbol(DEV_X, X,10*sizeof(double)));
vector_projection<double><<<1,10>>>(DEV_X, 10);
getLastCudaError("oops");
checkCudaErrors(cudaMemcpyFromSymbol(Y, DEV_X, 10*sizeof(double)));
return 0;
}
The kernel function vector_projection is defined in my_helper.cuh as follows:
template<typename T> __global__ void vector_projection(T *dx, int n) {
int tid;
tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
if (dx[tid] < 0)
dx[tid] = (T) 0;
}
}
As you can see, I use cudaMemcpyToSymbol and cudaMemcpyFromSymbol to transfer data to and from the device. However, I'm getting the following error:
CUDA error at ../src/vectorAdd.cu:19 code=4(cudaErrorLaunchFailure)
"cudaMemcpyFromSymbol(Y, DEV_X, 10*sizeof(double))"
Footnote: I can of course avoid to use __device__ variables and go for something like this which works fine; I just want to see how to do the same thing (if possible) with __device__ variables.
Update: The output of cuda-memcheck can be found at http://pastebin.com/AW9vmjFs. The error messages I get are as follows:
========= Invalid __global__ read of size 8
========= at 0x000000c8 in /home/ubuntu/Test0001/Debug/../src/my_helper.cuh:75:void vector_projection<double>(double*, int)
========= by thread (9,0,0) in block (0,0,0)
========= Address 0x000370e8 is out of bounds
The root of the problem is that you are not allowed to take the address of a device variable in ordinary host code:
vector_projection<double><<<1,10>>>(DEV_X, 10);
^
Although this seems to compile correctly, the actual address passed is garbage.
To take the address of a device variable in host code, we can use cudaGetSymbolAddress
Here is a worked example that compiles and runs correctly for me:
$ cat t577.cu
#include <stdio.h>
double X[10] = {1,-2,3,-4,5,-6,7,-8,9,-10};
double Y[10] = {0};
__device__ double DEV_X[10];
template<typename T> __global__ void vector_projection(T *dx, int n) {
int tid;
tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
if (dx[tid] < 0)
dx[tid] = (T) 0;
}
}
int main(void) {
cudaMemcpyToSymbol(DEV_X, X,10*sizeof(double));
double *my_dx;
cudaGetSymbolAddress((void **)&my_dx, DEV_X);
vector_projection<double><<<1,10>>>(my_dx, 10);
cudaMemcpyFromSymbol(Y, DEV_X, 10*sizeof(double));
for (int i = 0; i < 10; i++)
printf("%d: %f\n", i, Y[i]);
return 0;
}
$ nvcc -arch=sm_35 -o t577 t577.cu
$ cuda-memcheck ./t577
========= CUDA-MEMCHECK
0: 1.000000
1: 0.000000
2: 3.000000
3: 0.000000
4: 5.000000
5: 0.000000
6: 7.000000
7: 0.000000
8: 9.000000
9: 0.000000
========= ERROR SUMMARY: 0 errors
$
This is not the only way to address this. It is legal to take the address of a device variable in device code, so you could modify your kernel with a line something like this:
T *dx = DEV_X;
and forgo passing of the device variable as a kernel parameter. As suggested in the comments, you could also modify your code to use Unified Memory.
Regarding error checking, if you deviate from proper cuda error checking and are not careful in your deviations, the results may be confusing. Most cuda API calls can, in addition to errors arising from their own behavior, return an error that resulted from some previous CUDA asynchronous activity (usually kernel calls).
In my older CUDA project I had the globals:
__device__ uint8_t dev_intersect
__constant__ uint8_t dev_flags
... and used them this way:
cudaGetSymbolAddress((void**)&ptr_dev_intersect,"dev_intersect")
cudaMemcpyToSymbol("dev_flags",&flags,sizeof(flags))
Now, since CUDA 5.0 (and newer) the symbols must be passed directly (without string), so I define the globals this way:
__device__ uint8_t *dev_intersect
__constant__ uint8_t *dev_flags
...and call the functions this way:
cudaGetSymbolAddress((void**)&ptr_dev_intersect,dev_intersect)
cudaMemcpyToSymbol(dev_flags,&flags,sizeof(flags))
Am I doing it right so far? I'm asking you, because when I update the code, I start getting other errors, which makes me kinda suspicious. Thanks for any help.
Switching from a POD variable to a pointer is probably not what you want.
If you didn't make changes elsewhere in your code to account for that difference, I would expect things to break.
To update your cuda function calls, leave your variables as-is:
__device__ uint8_t dev_intersect;
__constant__ uint8_t dev_flags;
And just drop the quotes from your cuda API functions that use those variables:
cudaGetSymbolAddress((void**)&ptr_dev_intersect,dev_intersect);
cudaMemcpyToSymbol(dev_flags,&flags,sizeof(flags));
Here is a complete worked example:
$ cat t524.cu
#include <stdio.h>
typedef unsigned char uint8_t;
__device__ uint8_t dev_intersect;
__constant__ uint8_t dev_flags;
__global__ void mykernel(uint8_t *d1_ptr){
printf("data 1 = %c\n", *d1_ptr);
printf("dev_flags = %c\n", dev_flags);
}
int main(){
uint8_t *ptr_dev_intersect;
uint8_t flags = 'X';
uint8_t dev_intersect_data = 'Y';
cudaGetSymbolAddress((void**)&ptr_dev_intersect,dev_intersect);
cudaMemcpyToSymbol(dev_flags,&flags,sizeof(flags));
cudaMemcpyToSymbol(dev_intersect,&dev_intersect_data,sizeof(dev_intersect_data));
mykernel<<<1,1>>>(ptr_dev_intersect);
cudaDeviceSynchronize();
return 0;
}
$ nvcc -arch=sm_20 -o t524 t524.cu
$ cuda-memcheck ./t524
========= CUDA-MEMCHECK
data 1 = Y
dev_flags = X
========= ERROR SUMMARY: 0 errors
$