cudaMemcpy doesn't work in 64 bits - cuda

I made a very simple CUDA kernel which populates an array of 100 elements with f[i]=i (and checked using assert in another kernel that it had indeed done so).
#include<stdio.h>
#include<assert.h>
//definizione di gpuErrchk
__global__ void setToItself(int* vect){
vect[threadIdx.x] = threadIdx.x;
}
int main(){
int* a_d;
gpuErrchk( cudaMalloc(&a_d, 100 * sizeof(int)) );
setToItself<<<1,100>>>(a_d);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
I then copy the array back to the host for displaying using
int* a_h[100];
gpuErrchk( cudaMemcpy(a_h, a_d, 100*sizeof(int), cudaMemcpyDeviceToHost) );
for (int i = 0; i < 100; i++)
printf("%d %d\n",i, a_h[i]);
system("PAUSE");
}
When I compile it in 32 bit it works fine, but in 64 bits i get
f[0]=0
f[1]=2
f[2]=4
...
and the second half of the array is undefined.
I tried to change the compute_ and sm_ back from 35 to 20 as was default on 32 bits, but to no avail.
I tried adding error checking everywhere, but I got no output, meaning all functions returned success.
I searched, but there doesn't seem to be particular problems associated with 64 bit transition. What is this then?

This is not correct:
int* a_h[100];
It should be:
int a_h[100];
You want an array of 100 int values. Not an array of 100 int pointers. int values don't change their size between 32 and 64 bits. int pointers do change their size.

Related

Why doesn't the console show the entire output? [duplicate]

For the purpose of testing printf() call on device, I wrote a simple program which copies an array of moderate size to device and print the value of device array to screen. Although the array is correctly copied to device, the printf() function does not work correctly, which lost the first several hundred numbers. The array size in the code is 4096. Is this a bug or I'm not using this function properly? Thanks in adavnce.
EDIT: My gpu is GeForce GTX 550i, with compute capability 2.1
My code:
#include<stdio.h>
#include<stdlib.h>
#define N 4096
__global__ void Printcell(float *d_Array , int n){
int k = 0;
printf("\n=========== data of d_Array on device==============\n");
for( k = 0; k < n; k++ ){
printf("%f ", d_Array[k]);
if((k+1)%6 == 0) printf("\n");
}
printf("\n\nTotally %d elements has been printed", k);
}
int main(){
int i =0;
float Array[N] = {0}, rArray[N] = {0};
float *d_Array;
for(i=0;i<N;i++)
Array[i] = i;
cudaMalloc((void**)&d_Array, N*sizeof(float));
cudaMemcpy(d_Array, Array, N*sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
Printcell<<<1,1>>>(d_Array, N); //Print the device array by a kernel
cudaDeviceSynchronize();
/* Copy the device array back to host to see if it was correctly copied */
cudaMemcpy(rArray, d_Array, N*sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\n");
for(i=0;i<N;i++){
printf("%f ", rArray[i]);
if((i+1)%6 == 0) printf("\n");
}
}
printf from the device has a limited queue. It's intended for small scale debug-style output, not large scale output.
referring to the programmer's guide:
The output buffer for printf() is set to a fixed size before kernel launch (see Associated Host-Side API). It is circular and if more output is produced during kernel execution than can fit in the buffer, older output is overwritten.
Your in-kernel printf output overran the buffer, and so the first printed elements were lost (overwritten) before the buffer was dumped into the standard I/O queue.
The linked documentation indicates that the buffer size can be increased, also.

GTX 1080 Floating Point Performance Calculation [duplicate]

I'm trying to reach peak performance of each SM from the code below. The peak lies somewhere between 25 GFlops(GTX275-GT200 Arch.). This code gives 8 GFlops at the max.
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
//LOOP=10000000
//No. of blocks = 1
//Threads per block = 512 (I'm using GTX 275 - GT200 Arch.)
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
I don't want to increase ILP in the code. Any ideas why it's not reaching peak??
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
flag = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB);
cudaMemcpy(x,dx,sizeof(float)*THPB,cudaMemcpyDeviceToHost);
gflops = ((double)(BLOCKS)*(THPB)*LOOP/elapsed)/1000000;
printf("\t%f",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
Platform:
CUDA 3.0
NVIDIA GeForce GTX275 (GT200)
If I put together a complete repro case from your code, using the correct FLOP calculation:
#include <stdio.h>
#define LOOP (10000000)
#define BLOCKS (30)
#define THPB (512)
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB*BLOCKS);
cudaMemcpy(x,dx,sizeof(float)*THPB*BLOCKS,cudaMemcpyDeviceToHost);
gflops = 2.0e-6 * ((double)(LOOP)*double(THPB*BLOCKS)/(double)elapsed);
printf("\t%f\n",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
And I compile it and run it on a 1.4GHz GTX275 with CUDA 3.2 on a 64 bit linux platform:
$ nvcc -arch=sm_13 -Xptxas="-v" -o perf perf.cu
ptxas info : Compiling entry function '_Z7new_kerPf' for 'sm_13'
ptxas info : Used 4 registers, 8+16 bytes smem, 8 bytes cmem[1]
$ ./perf
Kernel1:
671.806039
I get within 0.01% of peak FLOP/s for that card running a pure FMAD code (1.4 GHz * 2 FLOP * 8 cores/MP * 30 MP) = 672 GFLOP/s.
So it seems that the code does, in fact, hit peak FLOP/s with one block per multiprocessor, but you just are not calculating the FLOP/s number correctly.

CUDA kernel call in a simple sample

It's the first parallel code of cuda by example .
Can any one describe me about the kernel call : <<< N , 1 >>>
This is the code with important points :
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
// fill the arrays 'a' and 'b' on the CPU
// copy the arrays 'a' and 'b' to the GPU
add<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
// display the results
// free the memory allocated on the GPU
return 0;
}
Why it used of <<< N , 1 >>> that it means we used of N blocks and 1 thread in each block ?? since we can write this <<< 1 , N >>> and used 1 block and N thread in this block for more optimization.
For this little example, there is no particular reason (as Bart already told you in the comments). But for a larger, more realistic example you should always keep in mind that the number of threads per block is limited. That is, if you use N = 10000, you could not use <<<1,N>>> anymore, but <<<N,1>>> would still work.

Can't reach peak performance

I'm trying to reach peak performance of each SM from the code below. The peak lies somewhere between 25 GFlops(GTX275-GT200 Arch.). This code gives 8 GFlops at the max.
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
//LOOP=10000000
//No. of blocks = 1
//Threads per block = 512 (I'm using GTX 275 - GT200 Arch.)
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
I don't want to increase ILP in the code. Any ideas why it's not reaching peak??
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
flag = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB);
cudaMemcpy(x,dx,sizeof(float)*THPB,cudaMemcpyDeviceToHost);
gflops = ((double)(BLOCKS)*(THPB)*LOOP/elapsed)/1000000;
printf("\t%f",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
Platform:
CUDA 3.0
NVIDIA GeForce GTX275 (GT200)
If I put together a complete repro case from your code, using the correct FLOP calculation:
#include <stdio.h>
#define LOOP (10000000)
#define BLOCKS (30)
#define THPB (512)
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB*BLOCKS);
cudaMemcpy(x,dx,sizeof(float)*THPB*BLOCKS,cudaMemcpyDeviceToHost);
gflops = 2.0e-6 * ((double)(LOOP)*double(THPB*BLOCKS)/(double)elapsed);
printf("\t%f\n",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
And I compile it and run it on a 1.4GHz GTX275 with CUDA 3.2 on a 64 bit linux platform:
$ nvcc -arch=sm_13 -Xptxas="-v" -o perf perf.cu
ptxas info : Compiling entry function '_Z7new_kerPf' for 'sm_13'
ptxas info : Used 4 registers, 8+16 bytes smem, 8 bytes cmem[1]
$ ./perf
Kernel1:
671.806039
I get within 0.01% of peak FLOP/s for that card running a pure FMAD code (1.4 GHz * 2 FLOP * 8 cores/MP * 30 MP) = 672 GFLOP/s.
So it seems that the code does, in fact, hit peak FLOP/s with one block per multiprocessor, but you just are not calculating the FLOP/s number correctly.

Simplest Possible Example to Show GPU Outperform CPU Using CUDA

I am looking for the most concise amount of code possible that can be coded both for a CPU (using g++) and a GPU (using nvcc) for which the GPU consistently outperforms the CPU. Any type of algorithm is acceptable.
To clarify: I'm literally looking for two short blocks of code, one for the CPU (using C++ in g++) and one for the GPU (using C++ in nvcc) for which the GPU outperforms. Preferably on the scale of seconds or milliseconds. The shortest code pair possible.
First off, I'll reiterate my comment: GPUs are high bandwidth, high latency. Trying to get the GPU to beat a CPU for a nanosecond job (or even a millisecond or second job) is completely missing the point of doing GPU stuff. Below is some simple code, but to really appreciate the performance benefits of GPU, you'll need a big problem size to amortize the startup costs over... otherwise, it's meaningless. I can beat a Ferrari in a two foot race, simply because it take some time to turn the key, start the engine and push the pedal. That doesn't mean I'm faster than the Ferrari in any meaningful way.
Use something like this in C++:
#define N (1024*1024)
#define M (1000000)
int main()
{
float data[N]; int count = 0;
for(int i = 0; i < N; i++)
{
data[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
{
data[i] = data[i] * data[i] - 0.25f;
}
}
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
Use something like this in CUDA/C:
#define N (1024*1024)
#define M (1000000)
__global__ void cudakernel(float *buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = 1.0f * i / N;
for(int j = 0; j < M; j++)
buf[i] = buf[i] * buf[i] - 0.25f;
}
int main()
{
float data[N]; int count = 0;
float *d_data;
cudaMalloc(&d_data, N * sizeof(float));
cudakernel<<<N/256, 256>>>(d_data);
cudaMemcpy(data, d_data, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_data);
int sel;
printf("Enter an index: ");
scanf("%d", &sel);
printf("data[%d] = %f\n", sel, data[sel]);
}
If that doesn't work, try making N and M bigger, or changing 256 to 128 or 512.
A very, very simple method would be to calculate the squares for, say, the first 100,000 integers, or a large matrix operation. Ita easy to implement and lends itself to the the GPUs strengths by avoiding branching, not requiring a stack, etc. I did this with OpenCL vs C++ awhile back and got some pretty astonishing results. (A 2GB GTX460 achieved about 40x the performance of a dual core CPU.)
Are you looking for example code, or just ideas?
Edit
The 40x was vs a dual core CPU, not a quad core.
Some pointers:
Make sure you're not running, say, Crysis while running your benchmarks.
Shot down all unnecessary apps and services that might be stealing CPU time.
Make sure your kid doesn't start watching a movie on your PC while the benchmarks are running. Hardware MPEG decoding tends to influence the outcome. (Autoplay let my two year old start Despicable Me by inserting the disk. Yay.)
As I said in my comment response to #Paul R, consider using OpenCL as it'll easily let you run the same code on the GPU and CPU without having to reimplement it.
(These are probably pretty obvious in retrospect.)
For reference, I made a similar example with time measurements. With GTX 660, the GPU speedup was 24X where its operation includes data transfers in addition to actual computation.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#define N (1024*1024)
#define M (10000)
#define THREADS_PER_BLOCK 1024
void serial_add(double *a, double *b, double *c, int n, int m)
{
for(int index=0;index<n;index++)
{
for(int j=0;j<m;j++)
{
c[index] = a[index]*a[index] + b[index]*b[index];
}
}
}
__global__ void vector_add(double *a, double *b, double *c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int j=0;j<M;j++)
{
c[index] = a[index]*a[index] + b[index]*b[index];
}
}
int main()
{
clock_t start,end;
double *a, *b, *c;
int size = N * sizeof( double );
a = (double *)malloc( size );
b = (double *)malloc( size );
c = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
start = clock();
serial_add(a, b, c, N, M);
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
end = clock();
float time1 = ((float)(end-start))/CLOCKS_PER_SEC;
printf("Serial: %f seconds\n",time1);
start = clock();
double *d_a, *d_b, *d_c;
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
vector_add<<< (N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, d_b, d_c );
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
end = clock();
float time2 = ((float)(end-start))/CLOCKS_PER_SEC;
printf("CUDA: %f seconds, Speedup: %f\n",time2, time1/time2);
return 0;
}
I agree with David's comments about OpenCL being a great way to test this, because of how easy it is to switch between running code on the CPU vs. GPU. If you're able to work on a Mac, Apple has a nice bit of sample code that does an N-body simulation using OpenCL, with kernels running on the CPU, GPU, or both. You can switch between them in real time, and the FPS count is displayed onscreen.
For a much simpler case, they have a "hello world" OpenCL command line application that calculates squares in a manner similar to what David describes. That could probably be ported to non-Mac platforms without much effort. To switch between GPU and CPU usage, I believe you just need to change the
int gpu = 1;
line in the hello.c source file to 0 for CPU, 1 for GPU.
Apple has some more OpenCL example code in their main Mac source code listing.
Dr. David Gohara had an example of OpenCL's GPU speedup when performing molecular dynamics calculations at the very end of this introductory video session on the topic (about around minute 34). In his calculation, he sees a roughly 27X speedup by going from a parallel implementation running on 8 CPU cores to a single GPU. Again, it's not the simplest of examples, but it shows a real-world application and the advantage of running certain calculations on the GPU.
I've also done some tinkering in the mobile space using OpenGL ES shaders to perform rudimentary calculations. I found that a simple color thresholding shader run across an image was roughly 14-28X faster when run as a shader on the GPU than the same calculation performed on the CPU for this particular device.