I am trying to apply a kernel function on a __device__ variable, which, according to the specs, resides "in global memory"
#include <stdio.h>
#include "sys_data.h"
#include "my_helper.cuh"
#include "helper_cuda.h"
#include <cuda_runtime.h>
double X[10] = {1,-2,3,-4,5,-6,7,-8,9,-10};
double Y[10] = {0};
__device__ double DEV_X[10];
int main(void) {
checkCudaErrors(cudaMemcpyToSymbol(DEV_X, X,10*sizeof(double)));
vector_projection<double><<<1,10>>>(DEV_X, 10);
getLastCudaError("oops");
checkCudaErrors(cudaMemcpyFromSymbol(Y, DEV_X, 10*sizeof(double)));
return 0;
}
The kernel function vector_projection is defined in my_helper.cuh as follows:
template<typename T> __global__ void vector_projection(T *dx, int n) {
int tid;
tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
if (dx[tid] < 0)
dx[tid] = (T) 0;
}
}
As you can see, I use cudaMemcpyToSymbol and cudaMemcpyFromSymbol to transfer data to and from the device. However, I'm getting the following error:
CUDA error at ../src/vectorAdd.cu:19 code=4(cudaErrorLaunchFailure)
"cudaMemcpyFromSymbol(Y, DEV_X, 10*sizeof(double))"
Footnote: I can of course avoid to use __device__ variables and go for something like this which works fine; I just want to see how to do the same thing (if possible) with __device__ variables.
Update: The output of cuda-memcheck can be found at http://pastebin.com/AW9vmjFs. The error messages I get are as follows:
========= Invalid __global__ read of size 8
========= at 0x000000c8 in /home/ubuntu/Test0001/Debug/../src/my_helper.cuh:75:void vector_projection<double>(double*, int)
========= by thread (9,0,0) in block (0,0,0)
========= Address 0x000370e8 is out of bounds
The root of the problem is that you are not allowed to take the address of a device variable in ordinary host code:
vector_projection<double><<<1,10>>>(DEV_X, 10);
^
Although this seems to compile correctly, the actual address passed is garbage.
To take the address of a device variable in host code, we can use cudaGetSymbolAddress
Here is a worked example that compiles and runs correctly for me:
$ cat t577.cu
#include <stdio.h>
double X[10] = {1,-2,3,-4,5,-6,7,-8,9,-10};
double Y[10] = {0};
__device__ double DEV_X[10];
template<typename T> __global__ void vector_projection(T *dx, int n) {
int tid;
tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
if (dx[tid] < 0)
dx[tid] = (T) 0;
}
}
int main(void) {
cudaMemcpyToSymbol(DEV_X, X,10*sizeof(double));
double *my_dx;
cudaGetSymbolAddress((void **)&my_dx, DEV_X);
vector_projection<double><<<1,10>>>(my_dx, 10);
cudaMemcpyFromSymbol(Y, DEV_X, 10*sizeof(double));
for (int i = 0; i < 10; i++)
printf("%d: %f\n", i, Y[i]);
return 0;
}
$ nvcc -arch=sm_35 -o t577 t577.cu
$ cuda-memcheck ./t577
========= CUDA-MEMCHECK
0: 1.000000
1: 0.000000
2: 3.000000
3: 0.000000
4: 5.000000
5: 0.000000
6: 7.000000
7: 0.000000
8: 9.000000
9: 0.000000
========= ERROR SUMMARY: 0 errors
$
This is not the only way to address this. It is legal to take the address of a device variable in device code, so you could modify your kernel with a line something like this:
T *dx = DEV_X;
and forgo passing of the device variable as a kernel parameter. As suggested in the comments, you could also modify your code to use Unified Memory.
Regarding error checking, if you deviate from proper cuda error checking and are not careful in your deviations, the results may be confusing. Most cuda API calls can, in addition to errors arising from their own behavior, return an error that resulted from some previous CUDA asynchronous activity (usually kernel calls).
Related
Given a 64-bit variable in a register in a kernel, let's say std::uint64_t var, I want to do some calculations and set each bit of this variable using 64 different threads separately but in parallel. Is it possible to write on a shared variable in parallel?
__shared__ std::uint64_t var = 0
in each thread (tid = 0 to 63):
do some calculations
if we should set the bit with index = tid then:
var |= ((std::uint64_t) 1 << tid)
I also realized that using atomicOr does not benefit us as it only works on integers.
You can do this using a 64-bit shared atomicOr():
$ cat t2082.cu
#include <cstdio>
#include <cstdint>
__global__ void k(){
__shared__ unsigned long long var;
if (!threadIdx.x) var = 0;
__syncthreads();
atomicOr(&var, 1<<threadIdx.x);
__syncthreads();
if (!threadIdx.x) printf("0x%lx\n", var);
}
int main(){
k<<<1,64>>>();
cudaDeviceSynchronize();
}
$ nvcc -o t2082 t2082.cu
$ compute-sanitizer ./t2082
========= COMPUTE-SANITIZER
0xffffffffffffffff
========= ERROR SUMMARY: 0 errors
$
The 64-bit shared atomicOr is supported on devices of cc3.5 or greater, which is the same device footprint supported by CUDA 11.
If you were, for example, on CUDA 10.x and using a cc3.0 device, you could do this with two 32-bit variables:
$ cat t2082.cu
#include <cstdio>
#include <cstdint>
__global__ void k(){
__shared__ unsigned var[2];
if (!threadIdx.x) {var[0] = 0; var[1] = 0;}
__syncthreads();
if (threadIdx.x < 32)
atomicOr(&(var[0]), 1<<threadIdx.x);
else
atomicOr(&(var[1]), 1<<(threadIdx.x-32));
__syncthreads();
unsigned long long var64 = (((unsigned long long)var[1])<<32) + var[0];
if (!threadIdx.x) printf("0x%lx\n", var64);
}
int main(){
k<<<1,64>>>();
cudaDeviceSynchronize();
}
$ nvcc -o t2082 t2082.cu
$ compute-sanitizer ./t2082
========= COMPUTE-SANITIZER
0xffffffffffffffff
========= ERROR SUMMARY: 0 errors
$
I need to make my kernel communicate with the host. I tried to use a global counter (better ways are well accepted), but the following code prints always 0. What am I doing wrong? (I tried both commented and uncommented ways).
#include <stdio.h>
#include <cuda_runtime.h>
//__device__ int count[1] = {0};
__device__ int count = 0;
__global__ void inc() {
//count[0]++;
atomicAdd(&count, 1);
}
int main(void) {
inc<<<1,10>>>();
cudaDeviceSynchronize();
//int *c;
int c;
cudaMemcpyFromSymbol(&c, count, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", c);
return 0;
}
Anytime you are having trouble with a CUDA code, I strongly encourage you to use proper CUDA error checking and run your code with cuda-memcheck, before asking others for help. Even if you don't understand the error output, providing it in your question will be useful for those trying to help you.
If you had done so, you would have received a report that cudaMemcpyFromSymbol is throwing an invalid argument error.
If you study the documentation for that function call, you will see that the 4th parameter is not the direction parameter, but is the offset parameter. So providing cudaMemcpyDeviceToHost is incorrect for the offset parameter. Since cudaMemcpyFromSymbol is always a device->host transfer, providing the direction argument is redundant, and since it is provided a default, is unnecessary. Your code works correctly for me simply by eliminating that:
$ cat t1414.cu
#include <stdio.h>
#include <cuda_runtime.h>
//__device__ int count[1] = {0};
__device__ int count = 0;
__global__ void inc() {
//count[0]++;
atomicAdd(&count, 1);
}
int main(void) {
inc<<<1,10>>>();
cudaDeviceSynchronize();
//int *c;
int c;
cudaMemcpyFromSymbol(&c, count, sizeof(int));
printf("%d\n", c);
return 0;
}
$ nvcc -o t1414 t1414.cu
$ cuda-memcheck ./t1414
========= CUDA-MEMCHECK
10
========= ERROR SUMMARY: 0 errors
$
I'm new to OpenACC. I like it very much so far as I'm familiar with OpenMP.
I have 2 1080Ti cards each with 9GB and I've 128GB of RAM. I'm trying a very basic test to allocate an array, initialize it, then sum it up in parallel. This works for 8 GB but when I increase to 10 GB I get out-of-memory error. My understanding was that with unified memory of Pascal (which these card are) and CUDA 8, I could allocate an array larger than the GPU's memory and the hardware will page in and page out on demand.
Here's my full C code test :
$ cat firstAcc.c
#include <stdio.h>
#include <openacc.h>
#include <stdlib.h>
#define GB 10
int main()
{
float *a;
size_t n = GB*1024*1024*1024/sizeof(float);
size_t s = n * sizeof(float);
a = (float *)malloc(s);
if (!a) { printf("Failed to malloc.\n"); return 1; }
printf("Initializing ... ");
for (int i = 0; i < n; ++i) {
a[i] = 0.1f;
}
printf("done\n");
float sum=0.0;
#pragma acc loop reduction (+:sum)
for (int i = 0; i < n; ++i) {
sum+=a[i];
}
printf("Sum is %f\n", sum);
free(a);
return 0;
}
As per the "Enable Unified Memory" section of this article I compile it with :
$ pgcc -acc -fast -ta=tesla:managed:cuda8 -Minfo firstAcc.c
main:
20, Loop not fused: function call before adjacent loop
Generated vector simd code for the loop
28, Loop not fused: function call before adjacent loop
Generated vector simd code for the loop containing reductions
Generated a prefetch instruction for the loop
I need to understand those messages but for now I don't think they are relevant. Then I run it :
$ ./a.out
malloc: call to cuMemAllocManaged returned error 2: Out of memory
Aborted (core dumped)
This works fine if I change GB to 8. I expected 10GB to work (despite the GPU card having 9GB) thanks to Pascal 1080Ti and CUDA 8.
Have I misunderstand, or what am I doing wrong? Thanks in advance.
$ pgcc -V
pgcc 17.4-0 64-bit target on x86-64 Linux -tp haswell
PGI Compilers and Tools
Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
$ cat /usr/local/cuda-8.0/version.txt
CUDA Version 8.0.61
Besides what Bob mentioned, I made a few more fixes.
First, you're not actually generating an OpenACC compute region since you only have a "#pragma acc loop" directive. This should be "#pragma acc parallel loop". You can see this in the compiler feedback messages where it's only showing host code optimizations.
Second, the "i" index should be declared as a "long". Otherwise, you'll overflow the index.
Finally, you need to add "cc60" to your target accelerator options to tell the compiler to target a Pascal based GPU.
% cat mi.c
#include <stdio.h>
#include <openacc.h>
#include <stdlib.h>
#define GB 20ULL
int main()
{
float *a;
size_t n = GB*1024ULL*1024ULL*1024ULL/sizeof(float);
size_t s = n * sizeof(float);
printf("n = %lu, s = %lu\n", n, s);
a = (float *)malloc(s);
if (!a) { printf("Failed to malloc.\n"); return 1; }
printf("Initializing ... ");
for (int i = 0; i < n; ++i) {
a[i] = 0.1f;
}
printf("done\n");
double sum=0.0;
#pragma acc parallel loop reduction (+:sum)
for (long i = 0; i < n; ++i) {
sum+=a[i];
}
printf("Sum is %f\n", sum);
free(a);
return 0;
}
% pgcc -fast -acc -ta=tesla:managed,cuda8.0,cc60 -Minfo=accel mi.c
main:
21, Accelerator kernel generated
Generating Tesla code
21, Generating reduction(+:sum)
22, #pragma acc loop gang, vector(128) /* blockIdx.x threadIdx.x */
21, Generating implicit copyin(a[:5368709120])
% ./a.out
n = 5368709120, s = 21474836480
Initializing ... done
Sum is 536870920.000000
I believe a problem is here:
size_t n = GB*1024*1024*1024/sizeof(float);
when I compile that line of code with g++, I get a warning about integer overflow. For some reason the PGI compiler is not warning, but the same badness is occurring under the hood. After the declarations of s, and n, if I add a printout like this:
size_t n = GB*1024*1024*1024/sizeof(float);
size_t s = n * sizeof(float);
printf("n = %lu, s = %lu\n", n, s); // add this line
and compile with PGI 17.04, and run (on a P100, with 16GB) I get output like this:
$ pgcc -acc -fast -ta=tesla:managed:cuda8 -Minfo m1.c
main:
16, Loop not fused: function call before adjacent loop
Generated vector simd code for the loop
22, Loop not fused: function call before adjacent loop
Generated vector simd code for the loop containing reductions
Generated a prefetch instruction for the loop
$ ./a.out
n = 4611686017890516992, s = 18446744071562067968
malloc: call to cuMemAllocManaged returned error 2: Out of memory
Aborted
$
so it's evident that n and s are not what you intended.
We can fix this by marking all of those constants with ULL, and then things seem to work correctly for me:
$ cat m1.c
#include <stdio.h>
#include <openacc.h>
#include <stdlib.h>
#define GB 20ULL
int main()
{
float *a;
size_t n = GB*1024ULL*1024ULL*1024ULL/sizeof(float);
size_t s = n * sizeof(float);
printf("n = %lu, s = %lu\n", n, s);
a = (float *)malloc(s);
if (!a) { printf("Failed to malloc.\n"); return 1; }
printf("Initializing ... ");
for (int i = 0; i < n; ++i) {
a[i] = 0.1f;
}
printf("done\n");
double sum=0.0;
#pragma acc loop reduction (+:sum)
for (int i = 0; i < n; ++i) {
sum+=a[i];
}
printf("Sum is %f\n", sum);
free(a);
return 0;
}
$ pgcc -acc -fast -ta=tesla:managed:cuda8 -Minfo m1.c
main:
16, Loop not fused: function call before adjacent loop
Generated vector simd code for the loop
22, Loop not fused: function call before adjacent loop
Generated vector simd code for the loop containing reductions
Generated a prefetch instruction for the loop
$ ./a.out
n = 5368709120, s = 21474836480
Initializing ... done
Sum is 536870920.000000
$
Note that I've made another change above as well. I changed the sum accumulation variable from float to double. This is necessary to preserve somewhat "sensible" results when doing a very large reduction across very small quantities.
And, as #MatColgrove pointed out in his answer, I missed a few other things as well.
It is strange that when I do not add cuda-memcheck before ./main, the program runs without any warning or error message, however, when I add it, it will have error message like following.
========= Invalid __global__ write of size 8
========= at 0x00000120 in initCurand(curandStateXORWOW*, unsigned long)
========= by thread (9,0,0) in block (3,0,0)
========= Address 0x5005413b0 is out of bounds
========= Saved host backtrace up to driver entry point at kernel launch time
========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 (cuLaunchKernel + 0x2c5) [0x204115]
========= Host Frame:./main [0x18e11]
========= Host Frame:./main [0x369b3]
========= Host Frame:./main [0x3403]
========= Host Frame:./main [0x308c]
========= Host Frame:./main [0x30b7]
========= Host Frame:./main [0x2ebb]
========= Host Frame:/lib/x86_64-linux-gnu/libc.so.6 (__libc_start_main + 0xf0) [0x20830]
Here is my functions, a brief introduction on the code, I try to generate a random numbers and save them to a device variable weights, then use this vector to sample from discrete numbers.
#include<iostream>
#include<curand.h>
#include<curand_kernel.h>
#include<time.h>
using namespace std;
#define num 100
__device__ float weights[num];
// function to define seed
__global__ void initCurand(curandState *state, unsigned long seed){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, idx, 0, &state[idx]);
}
__device__ void sampling(float *weight, float max_weight, int *index, curandState *state){
int j;
float u;
do{
j = (int)(curand_uniform(state) * (num + 0.999999));
u = curand_uniform(state); //sample from uniform distribution;
}while( u > weight[j]/max_weight);
*index = j;
}
__global__ void test(int *dev_sample, curandState *state){
int idx = threadIdx.x + blockIdx.x * blockDim.x;\
// generate random numbers from uniform distribution and save them to weights
weights[idx] = curand_uniform(&state[idx]);
// run sampling function, in which, weights is an input for the function on each thread
sampling(weights, 1, dev_sample+idx, &state[idx]);
}
int main(){
// define the seed of random generator
curandState *devState;
cudaMalloc((void**)&devState, num*sizeof(curandState));
int *h_sample;
h_sample = (int*) malloc(num*sizeof(int));
int *d_sample;
cudaMalloc((void**)&d_sample, num*sizeof(float));
initCurand<<<(int)num/32 + 1, 32>>>(devState, 1);
test<<<(int)num/32 + 1, 32>>>(d_sample, devState);
cudaMemcpy(h_sample, d_sample, num*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < num; ++i)
{
cout << *(h_sample + i) << endl;
}
//free memory
cudaFree(devState);
free(h_sample);
cudaFree(d_sample);
return 0;
}
Just start to learn cuda, if the methods to access the global memory is incorrect, please help me with that. Thanks
This is launching "extra" threads:
initCurand<<<(int)num/32 + 1, 32>>>(devState, 1);
num is 100, so the above config will launch 4 blocks of 32 threads each, i.e. 128 threads. But you are only allocating space for 100 curandState here:
cudaMalloc((void**)&devState, num*sizeof(curandState));
So your initCurand kernel will have some threads (idx = 100-127) that are attempting to initialize some curandState that you haven't allocated. As a result when you run cuda-memcheck which does fairly rigorous out-of-bounds checking, an error is reported.
One possible solution would be to modify your initCurand kernel as follows:
__global__ void initCurand(curandState *state, unsigned long seed, int num){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < num)
curand_init(seed, idx, 0, &state[idx]);
}
This will prevent any out-of-bounds threads from doing anything. Note that you will need to modify the kernel call to pass num to it. Also, it appears to me you have a similar problem in your test kernel. You may want to do something similar to fix it there. This is a common construct in CUDA kernels, I call it a "thread check". You can find other questions here on the SO tag discussing this same concept.
I have a problem! I need to initialize a constant global array in cuda c. To initialize the array i need to use a for! I need to do this because I have to use this array in some kernels and my professor told me to define as a constant visible only in the device.
How can I do this??
I want to do something like this:
#include <stdio.h>
#include <math.h>
#define N 8
__constant__ double H[N*N];
__global__ void prodotto(double *v, double *w){
int k=threadIdx.x+blockDim.x*blockIdx.x;
w[k]=0;
for(int i=0;i<N;i++) w[k]=w[k]+H[k*N+i]*v[i];
}
int main(){
double v[8]={1, 1, 1, 1, 1, 1, 1, 1};
double *dev_v, *dev_w, *w;
double *host_H;
host_H=(double*)malloc((N*N)*sizeof(double));
cudaMalloc((void**)&dev_v,sizeof(double));
cudaMalloc((void**)&dev_w,sizeof(double));
for(int k=0;k<N;k++){
host_H[2*N*k+2*k]=1/1.414;
host_H[2*N*k+2*k+1]=1/1.414;
host_H[(2*k+1)*N+2*k]=1/1.414;
host_H[(2*k+1)+2*k+1]=-1/1.414;
}
cudaMemcpyToSymbol(H, host_H, (N*N)*sizeof(double));
cudaMemcpy(dev_v, v, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_w, w, N*sizeof(double), cudaMemcpyHostToDevice);
prodotto<<<1,N>>>(dev_v, dev_w);
cudaMemcpy(v, dev_v, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(w, dev_w, N*sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++) printf("\n%f %f", v[i], w[i]);
return 0;
}
But the output is an array of zeros...I want the output array to be filled with the product of the matrix H(here seen as an array) and the array v.
Thanks !!!!!
Something like this should work:
#define DSIZE 32
__constant__ int mydata[DSIZE];
int main(){
...
int *h_mydata;
h_mydata = new int[DSIZE];
for (int i = 0; i < DSIZE; i++)
h_mydata[i] = ....; // initialize however you wish
cudaMemcpyToSymbol(mydata, h_mydata, DSIZE*sizeof(int));
...
}
Not difficult. You can then use the __constant__ data directly in a kernel:
__global__ void mykernel(...){
...
int myval = mydata[threadIdx.x];
...
}
You can read about __constant__ variables in the programming guide. __constant__ variables are read-only from the perspective of device code (kernel code). But from the host, they can be read from or written to using the cudaMemcpyToSymbol/cudaMemcpyFromSymbol API.
EDIT: Based on the code you've now posted, there were at least 2 errors:
Your allocation sizes for dev_v and dev_w were not correct.
You had no host allocation for w.
The following code seems to work correctly for me with those 2 fixes:
$ cat t579.cu
#include <stdio.h>
#include <math.h>
#define N 8
__constant__ double H[N*N];
__global__ void prodotto(double *v, double *w){
int k=threadIdx.x+blockDim.x*blockIdx.x;
w[k]=0;
for(int i=0;i<N;i++) w[k]=w[k]+H[k*N+i]*v[i];
}
int main(){
double v[N]={1, 1, 1, 1, 1, 1, 1, 1};
double *dev_v, *dev_w, *w;
double *host_H;
host_H=(double*)malloc((N*N)*sizeof(double));
w =(double*)malloc( (N)*sizeof(double));
cudaMalloc((void**)&dev_v,N*sizeof(double));
cudaMalloc((void**)&dev_w,N*sizeof(double));
for(int k=0;k<N;k++){
host_H[2*N*k+2*k]=1/1.414;
host_H[2*N*k+2*k+1]=1/1.414;
host_H[(2*k+1)*N+2*k]=1/1.414;
host_H[(2*k+1)+2*k+1]=-1/1.414;
}
cudaMemcpyToSymbol(H, host_H, (N*N)*sizeof(double));
cudaMemcpy(dev_v, v, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_w, w, N*sizeof(double), cudaMemcpyHostToDevice);
prodotto<<<1,N>>>(dev_v, dev_w);
cudaMemcpy(v, dev_v, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(w, dev_w, N*sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++) printf("\n%f %f", v[i], w[i]);
printf("\n");
return 0;
}
$ nvcc -arch=sm_20 -o t579 t579.cu
$ cuda-memcheck ./t579
========= CUDA-MEMCHECK
1.000000 0.000000
1.000000 -0.707214
1.000000 -0.707214
1.000000 -1.414427
1.000000 1.414427
1.000000 0.707214
1.000000 1.414427
1.000000 0.707214
========= ERROR SUMMARY: 0 errors
$
A few notes:
Any time you're having trouble with a CUDA code, it's good practice to use proper cuda error checking.
You can run your code with cuda-memcheck (just as I have above) to get a quick read of whether any CUDA errors are encountered.
I've not verified the numerical results or worked through the math. If it's not what you wanted, I assume you can sort it out.
I've not made any changes to your code other than what seemed sensible to me to fix the obvious errors and make the results presentable for educational purposes. Certainly there can be discussions about preferred allocation methods, printf vs. cout, and what have you. I'm focused primarily on CUDA topics in this answer.