Generating white noise using CUDA on GPU - cuda

I want to generate white noise (normal distribution) using CUDA. Below is my attempt.
enter code here
#define SCALE 1.0
#define SHIFT 0.0
#define BLOCKS 64
#define THREADS 64
__global__ void setup_kernel(curandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(7+id, id, 0, &state[id]);
}
__global__ void generate_normal_kernel(curandState *state, int *result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float x;
curandState localState = state[id];
for(int n = 0; n < 100000; n++) {
x = (curand_normal(&localState) * SCALE)+SHIFT;
}
state[id] = localState;
result[id] = (int) x;
}
int main(int argc, char *argv[])
{
int i;
unsigned int total;
curandState *devStates;
int *devResults, *hostResults;
int device;
struct cudaDeviceProp properties;
CUDA_CALL(cudaGetDevice(&device));
CUDA_CALL(cudaGetDeviceProperties(&properties,device));
hostResults = (int *)calloc(THREADS * BLOCKS, sizeof(int));
CUDA_CALL(cudaMalloc((void **)&devResults, BLOCKS * THREADS *
sizeof(int)));
CUDA_CALL(cudaMemset(devResults, 0, THREADS * BLOCKS *
sizeof(int)));
CUDA_CALL(cudaMalloc((void **)&devStates, THREADS * BLOCKS *
sizeof(curandState)));
setup_kernel<<<BLOCKS, THREADS>>>(devStates);
generate_normal_kernel<<<BLOCKS, THREADS>>>(devStates, devResults);
CUDA_CALL(cudaMemcpy(hostResults, devResults, BLOCKS * THREADS *
sizeof(int), cudaMemcpyDeviceToHost));
I_TCS = ITCSAmp*hostResults;
/* Cleanup */
CUDA_CALL(cudaFree(devStates));
CUDA_CALL(cudaFree(devResults));
free(hostResults);
return EXIT_SUCCESS;
}
===============================================================================
But I got the following errors,
error: identifier "CUDA_CALL" is undefined
error: expression must have arithmetic or enum type
error: expression must have arithmetic or enum type
error: expression must have arithmetic or enum type
warning: variable "total" was declared but never referenced
error: identifier "devStates" is undefined
error: identifier "CUDA_CALL" is undefined
error: identifier "devResults" is undefined
error: identifier "hostResults" is undefined
It thought I defined them already, but obviously it didn't work. If you have any suggestions or know how might I change the code, I will be really thankful for your help!

Please, find below a compilable and executable code generating random numbers with normal distribution in CUDA. It is a modification of the code that you posted above. Some of the changed instructions are commented in their old versions.
I have changed the CUDA_CALL to gpuErrchk according to What is the canonical way to check for errors using the CUDA runtime API?.
I think you have misinterpreted the curand_init syntax and fixed it. Also, the setup_kernel kernel missed a seed, so that I have added it.
I have simplified your generate_normal_kernel kernel: I believe that the for loop repeteadly calculating x is undeeded.
curand_normal returns floats, not ints, and indeed a normal distribution of integers is underfined. I have changed the relevant variable types accordingly.
#include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <time.h>
#define SCALE 1.0f
#define SHIFT 0.0f
#define BLOCKS 64
#define THREADS 64
/***********************/
/* CUDA ERROR CHECKING */
/***********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*************************/
/* CURAND INITIALIZATION */
/*************************/
__global__ void setup_kernel(unsigned long seed, curandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, id, 0, &state[id]);
// curand_init(7+id, id, 0, &state[id]);
}
/*****************************************/
/* RANDOM DISTRIBUTION GENERATION KERNEL */
/*****************************************/
__global__ void generate_normal_kernel(curandState *state, float *result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
result[id] = (curand_normal(&state[id])*SCALE)+SHIFT;
}
/********/
/* MAIN */
/********/
void main()
{
float* hostResults = (float*)calloc(THREADS * BLOCKS, sizeof(float));
float *devResults; gpuErrchk(cudaMalloc((void**)&devResults, BLOCKS * THREADS * sizeof(float)));
gpuErrchk(cudaMemset(devResults, 0, THREADS * BLOCKS * sizeof(float)));
curandState *devStates; gpuErrchk(cudaMalloc((void **)&devStates, THREADS * BLOCKS * sizeof(curandState)));
setup_kernel<<<BLOCKS, THREADS>>>(time(NULL),devStates);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
generate_normal_kernel<<<BLOCKS, THREADS>>>(devStates, devResults);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(hostResults, devResults, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost));
for (int i=0; i<THREADS*BLOCKS; i++) printf("rand[%i] = %f\n", i, hostResults[i]);
/* Cleanup */
gpuErrchk(cudaFree(devStates));
gpuErrchk(cudaFree(devResults));
free(hostResults);
getchar();
}

Related

invalid device ordinal on cudaMemPrefetchAsync

I'm running a toy CUDA sample on my GeForce 1080 Ti (Pascal) on windows 10 and CUDA 9.2.
Goal is to test cudaMemPrefetchAsync to the CPU, as it's supposed to work.
However, I get a CUDA error (invalid device ordinal) on this particular line.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cstdlib>
void fill(int* a, int val, int N) {
for (int k = 0; k < N; ++k) {
a[k] = val;
}
}
__global__ void add(int* a, int* b, int N)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += blockDim.x * gridDim.x) {
a[i] += b[i];
}
}
inline void check(cudaError_t err, const char* file, int line) {
if (err != cudaSuccess) {
::fprintf(stderr, "ERROR at %s[%d] : %s\n", file, line, cudaGetErrorString(err));
abort();
}
}
#define CUDA_CHECK(err) do { check(err, __FILE__, __LINE__); } while(0)
int main()
{
int deviceId;
CUDA_CHECK(cudaGetDevice(&deviceId));
const int N = 1024*1024*32;
int *a, *b;
CUDA_CHECK(cudaMallocManaged(&a, N * sizeof(int)));
CUDA_CHECK(cudaMallocManaged(&b, N * sizeof(int)));
CUDA_CHECK(cudaMemPrefetchAsync(a, N * sizeof(int), cudaCpuDeviceId)); // program breaks here
CUDA_CHECK(cudaMemPrefetchAsync(b, N * sizeof(int), cudaCpuDeviceId));
fill(a, 1, N);
fill(a, 2, N);
CUDA_CHECK(cudaMemPrefetchAsync(a, N * sizeof(int), deviceId));
CUDA_CHECK(cudaMemPrefetchAsync(b, N * sizeof(int), deviceId));
add<<<32, 256>>>(a, b, N);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return 0;
}
Is that a hardware/driver/OS limitation? Can I simply ignore the error?
Is that a hardware/driver/OS limitation?
Yes, the latter. Quoting from the documentation
GPUs with SM architecture 6.x or higher (Pascal class or newer)
provide additional Unified Memory features such as on-demand page
migration and GPU memory oversubscription that are outlined throughout
this document. Note that currently these features are only supported
on Linux operating systems.
So asynchronous page migration is not supported in Windows at the moment and that it why you get an error when you try to enable it.

Compare Thrust fill with kernel launch speed [duplicate]

I want to add 128-bit vectors with carry. My 128-bit version (addKernel128 in the code below) is twice slower than the basic 32-bit version (addKernel32 below).
Do I have memory coalescing problems ? How can I get better performance ?
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#define UADDO(c, a, b) asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
#define UADDC(c, a, b) asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
__global__ void addKernel32(unsigned int *c, const unsigned int *a, const unsigned int *b, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void addKernel128(unsigned *c, const unsigned *a, const unsigned *b, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size / 4)
{
uint4 a4 = ((const uint4 *)a)[tid],
b4 = ((const uint4 *)b)[tid],
c4;
UADDO(c4.x, a4.x, b4.x)
UADDC(c4.y, a4.y, b4.y) // add with carry
UADDC(c4.z, a4.z, b4.z) // add with carry
UADDC(c4.w, a4.w, b4.w) // add with carry (no overflow checking for clarity)
((uint4 *)c)[tid] = c4;
tid += blockDim.x * gridDim.x;
}
}
int main()
{
const int size = 10000000; // 10 million
unsigned int *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, size * sizeof(int));
cudaMalloc((void**)&d_b, size * sizeof(int));
cudaMalloc((void**)&d_c, size * sizeof(int));
cudaMemset(d_a, 1, size * sizeof(int)); // dummy init just for the example
cudaMemset(d_b, 2, size * sizeof(int)); // dummy init just for the example
cudaMemset(d_c, 0, size * sizeof(int));
int nbThreads = 512;
int nbBlocks = 1024; // for example
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
addKernel128<<<nbBlocks, nbThreads>>>(d_c, d_a, d_b, size);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float m = 0;
cudaEventElapsedTime(&m, start, stop);
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
cudaDeviceReset();
printf("Elapsed = %g\n", m);
return 0;
}
Timing CUDA code on a WDDM GPU can be quite difficult for a variety of reasons. Most of these revolve around the fact that the GPU is being managed as a display device by Windows, and this can introduce a variety of artifacts into the timing. One example is that the windows driver and WDDM will batch work for the GPU, and may interleave display work in the middle of CUDA GPU work.
if possible, time your cuda code on linux, or else on a windows GPU
in TCC mode.
for performance, always build without the -G switch. In visual studio, this usually corresponds to building the release, not the debug version of the project.
To get a good performance comparison, it's usually advisable to do some "warm up runs" before actually measuring the timing results. These will eliminate "start-up" and other one-time measurement issues, are you are more likely to get sensible results. You may also wish to run your code a number of times and average the results.
It's also usually advisable to compile with an arch flag that corresponds to your GPU, so for example -arch=sm_20 for a cc2.0 GPU.

invalid device symbol cudaMemcpyFromSymbol CUDA

I want to calculate the sum of all elements of an array in CUDA. I came up with this code. It compiles without any error. But the result is always zero. I've got the invalid device symbol from cudaMemcpyFromSymbol. I cannot use any libraries like Thrust or Cublas.
#define TRIALS_PER_THREAD 4096
#define NUM_BLOCKS 256
#define NUM_THREADS 256
double *dev;
__device__ volatile double pi_gpu = 0;
__global__ void ArraySum(double *array)
{
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
pi_gpu = pi_gpu + array[tid];
__syncthreads();
}
int main (int argc, char *argv[]) {
cudaMalloc((void **) &dev, NUM_BLOCKS * NUM_THREADS * sizeof(double));
double pi_gpu_h;
ArraySum<<<NUM_BLOCKS, NUM_THREADS>>>(dev);
cudaDeviceSynchronize();
cudaError err = cudaMemcpyFromSymbol(&pi_gpu_h, &pi_gpu, sizeof(double), cudaMemcpyDeviceToHost);
if( cudaSuccess != err )
{
fprintf( stderr, "cudaMemcpyFromSymbolfailed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return pi_gpu_h; // this is always zero!!!
}
The symbol argument in the copy from symbol call is incorrect. It should look like this:
cudaMemcpyFromSymbol(&pi_gpu_h, pi_gpu, sizeof(double), 0, cudaMemcpyDeviceToHost)

why am I seeing a black screen when I try this code in cuda?

I'm using Win8 and Nsight in "visual studio 2010" and I installed "310.90-notebook-win8-win7-winvista-32bit-international-whql" for my Graphic card(9300m Gs).but when I try the code below,I see a black screen!and an error :"Display driver stoped responding and has recoverd"!
I know that the problem is with "cudaMemcpy",but I don't why!?
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 8
__global__ void kernel(int *a)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int step = x;
while(step<N){
a[step] = threadIdx.x;
step += x;
}
}
int main()
{
int a[N],i=N,j=0;
for(;j<N;j++)
a[j]=i--;
int *dev_a;
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
kernel<<<2,2>>>(dev_a);
cudaError_t cudaStatus = cudaMemcpy(a, dev_a,N-1 * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
//goto Error;
}
for(j=0;j<N;j++)printf("\n%d",a[j]);
int t;
scanf("%d",&t);
}
In the kernel, the thread with threadIdx.x = 0 and blockIdx.x = 0 i.e. the first thread of the first block will run indefinitely, causing the kernel to crash.
When threadIdx.x = 0 and blockIdx.x = 0 the kernel code will become:
int x = 0;
int step = 0;
while(step<N)
{
a[step] = 0;
step += 0; //This will create infinite loop
}
Also (May be its a typo), there is a logical error in the following line of your code:
cudaError_t cudaStatus = cudaMemcpy(a, dev_a,N-1 * sizeof(int), cudaMemcpyDeviceToHost);
Considering the operator precedence in C, the expression N-1 * sizeof(int) will evaluate to N-4 (if sizeof(int) is 4).

Performance of static versus dynamic CUDA shared memory allocation

I have 2 kernels that do exactly the same thing. One of them allocates shared memory statically while the other allocates the memory dynamically at run time. I am using the shared memory as 2D array. So for the dynamic allocation, I have a macro that computes the memory location. Now, the results generated by the 2 kernels are exactly the same. However, the timing results I got from both kernels are 3 times apart! The static memory allocation is much faster. I am sorry that I can't post any of my code. Can someone give a justification for this?
I have no evidence that static shared memory allocation is faster than dynamic shared memory allocation. As was evidenced in the comments above, it would be impossible to answer your question without a reproducer. In at least the case of the code below, the timings of the same kernel, when run with static or dynamic shared memory allocations, are exactly the same:
#include <cuda.h>
#include <stdio.h>
#define BLOCK_SIZE 512
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/***********************************/
/* SHARED MEMORY STATIC ALLOCATION */
/***********************************/
__global__ void kernel_static_memory_allocation(int *d_inout, int N)
{
__shared__ int s[BLOCK_SIZE];
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
s[tid] = d_inout[i];
__syncthreads();
s[tid] = s[tid] * s[tid];
__syncthreads();
d_inout[i] = s[tid];
}
}
/************************************/
/* SHARED MEMORY DYNAMIC ALLOCATION */
/************************************/
__global__ void kernel_dynamic_memory_allocation(int *d_inout, int N)
{
extern __shared__ int s[];
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
s[tid] = d_inout[i];
__syncthreads();
s[tid] = s[tid] * s[tid];
__syncthreads();
d_inout[i] = s[tid];
}
}
/********/
/* MAIN */
/********/
int main(void)
{
int N = 1000000;
int* a = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; i++) { a[i] = i; }
int *d_inout; gpuErrchk(cudaMalloc(&d_inout, N * sizeof(int)));
int n_blocks = N/BLOCK_SIZE + (N%BLOCK_SIZE == 0 ? 0:1);
gpuErrchk(cudaMemcpy(d_inout, a, N*sizeof(int), cudaMemcpyHostToDevice));
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel_static_memory_allocation<<<n_blocks,BLOCK_SIZE>>>(d_inout, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Static allocation - elapsed time: %3.3f ms \n", time);
cudaEventRecord(start, 0);
kernel_dynamic_memory_allocation<<<n_blocks,BLOCK_SIZE,BLOCK_SIZE*sizeof(int)>>>(d_inout, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Dynamic allocation - elapsed time: %3.3f ms \n", time);
}
The possible reason for that is due to the fact that the disassembled codes for the two kernels are exactly the same and do not change even on replacing int N = 1000000; with int N = rand();.