Why my vectorized access kernel is so slow? - cuda

I am trying to understand vectorized memory access and implement a simple example to evaluate the performance. But I found that the vectorized one is slower than the naive one?
in vectorized kernel, i recast the int pointer to an int2 pointer and then do the data copy.
This is the code I used:
#include <cuda_runtime.h>
#include <stdio.h>
#include <chrono>
#include <iostream>
void initData_int(int *p, int size){
for (int t=0; t<size; t++){
p[t] = (int)(rand()&0xff);
}
}
__global__ void naiveCopy(int *d_in, int *d_out, int size)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
for (int i = tid; i < size; i += blockDim.x*gridDim.x)
{
d_out[i] = d_in[i];
}
}
__global__ void vecCopy(int *d_in, int *d_out, int size)
{
int2* in = (int2*)d_in;
int2* out = (int2*)d_out;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
for (int i = tid; i < size/2; i += blockDim.x*gridDim.x)
{
out[i] = in[i];
}
if(tid==size/2 && size%2==1)
d_out[size-1] = d_in[size-1];
}
int main(int argc, char **argv)
{
int size = 1<<24;
//int size = 128;
int nBytes = size*sizeof(int);
int *d_h;
cudaMallocHost((int**)&d_h, nBytes);
initData_int(d_h, size);
//printData(d_h, size);
int *res = (int*)malloc(nBytes);
cudaStream_t stream;
cudaStreamCreate(&stream);
int *d_in, *d_out;
dim3 block(128, 1);
dim3 grid((size-1)/block.x+1, 1);
cudaMalloc((int**)&d_in, nBytes);
cudaMalloc((int**)&d_out, nBytes);
cudaMemcpyAsync(d_in, d_h, nBytes, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
auto s_0 = std::chrono::system_clock::now();
naiveCopy<<<grid, block, 0, stream>>>(d_in, d_out, size);
cudaStreamSynchronize(stream);
auto e_0 = std::chrono::system_clock::now();
std::chrono::duration<double> diff = e_0 - s_0;
printf("Naive Kernel time cost is: %2f.\n", diff.count());
memset(res, 0, nBytes);
cudaMemset(d_out, 0, nBytes);
//vectorized access:
cudaStreamSynchronize(stream);
s_0 = std::chrono::system_clock::now();
vecCopy<<<grid, block, 0, stream>>>(d_in, d_out, size);
cudaStreamSynchronize(stream);
e_0 = std::chrono::system_clock::now();
diff = e_0 - s_0;
printf("Vectorized kernel time cost is: %2f.\n", diff.count());
cudaStreamDestroy(stream);
cudaFree(d_h);
cudaFree(d_in);
cudaFree(d_out);
free(res);
return 0;
}
This is the data from nvprof:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 89.28% 5.5024ms 1 5.5024ms 5.5024ms 5.5024ms [CUDA memcpy HtoD]
4.82% 296.94us 1 296.94us 296.94us 296.94us vecCopy(int*, int*, int)
3.99% 246.19us 1 246.19us 246.19us 246.19us naiveCopy(int*, int*, int)
Could you please explain what causes the performance degradation?

You are not doing a good job of grid sizing. Your grid dimensions might be sensible for the naive kernel:
dim3 grid((size-1)/block.x+1, 1);
But they are unnecessarily twice as large as they need to be for the vectorized copy kernel.
When I cut the grid size in half for the vectorized kernel (to match the methodology for the naive kernel):
dim3 grid2((size/2+block.x-1)/block.x);
then according to my testing, the vectorized copy kernel becomes faster:
3.88% 233.99us 1 233.99us 233.99us 233.99us naiveCopy(int*, int*, int)
2.84% 171.33us 1 171.33us 171.33us 171.33us vecCopy(int*, int*, int)
Notes:
cudaFree is not the correct API to use with cudaMallocHost. The correct API is cudaFreeHost.
We can probably do a better job of grid sizing, as was mentioned in the comments, by sizing the grid to match the GPU you are running on. However we don't need to take this step in order to demonstrate the improvement here.

Related

Compare Thrust fill with kernel launch speed [duplicate]

I want to add 128-bit vectors with carry. My 128-bit version (addKernel128 in the code below) is twice slower than the basic 32-bit version (addKernel32 below).
Do I have memory coalescing problems ? How can I get better performance ?
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#define UADDO(c, a, b) asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
#define UADDC(c, a, b) asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
__global__ void addKernel32(unsigned int *c, const unsigned int *a, const unsigned int *b, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void addKernel128(unsigned *c, const unsigned *a, const unsigned *b, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size / 4)
{
uint4 a4 = ((const uint4 *)a)[tid],
b4 = ((const uint4 *)b)[tid],
c4;
UADDO(c4.x, a4.x, b4.x)
UADDC(c4.y, a4.y, b4.y) // add with carry
UADDC(c4.z, a4.z, b4.z) // add with carry
UADDC(c4.w, a4.w, b4.w) // add with carry (no overflow checking for clarity)
((uint4 *)c)[tid] = c4;
tid += blockDim.x * gridDim.x;
}
}
int main()
{
const int size = 10000000; // 10 million
unsigned int *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, size * sizeof(int));
cudaMalloc((void**)&d_b, size * sizeof(int));
cudaMalloc((void**)&d_c, size * sizeof(int));
cudaMemset(d_a, 1, size * sizeof(int)); // dummy init just for the example
cudaMemset(d_b, 2, size * sizeof(int)); // dummy init just for the example
cudaMemset(d_c, 0, size * sizeof(int));
int nbThreads = 512;
int nbBlocks = 1024; // for example
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
addKernel128<<<nbBlocks, nbThreads>>>(d_c, d_a, d_b, size);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float m = 0;
cudaEventElapsedTime(&m, start, stop);
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
cudaDeviceReset();
printf("Elapsed = %g\n", m);
return 0;
}
Timing CUDA code on a WDDM GPU can be quite difficult for a variety of reasons. Most of these revolve around the fact that the GPU is being managed as a display device by Windows, and this can introduce a variety of artifacts into the timing. One example is that the windows driver and WDDM will batch work for the GPU, and may interleave display work in the middle of CUDA GPU work.
if possible, time your cuda code on linux, or else on a windows GPU
in TCC mode.
for performance, always build without the -G switch. In visual studio, this usually corresponds to building the release, not the debug version of the project.
To get a good performance comparison, it's usually advisable to do some "warm up runs" before actually measuring the timing results. These will eliminate "start-up" and other one-time measurement issues, are you are more likely to get sensible results. You may also wish to run your code a number of times and average the results.
It's also usually advisable to compile with an arch flag that corresponds to your GPU, so for example -arch=sm_20 for a cc2.0 GPU.

CUDA device runtime api cudaMemsetAsync doesn't work

I am trying to call cudaMemsetAsync from kernel (so called "dynamic parallelism"). But no matter what value I use, it always set memory to 0.
Here is my test code:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_device_runtime_api.h"
#include <stdio.h>
const int size = 5;
__global__ void kernel(int *c)
{
cudaMemsetAsync(c, 0x7FFFFFFF, size * 4, NULL);
}
int main()
{
cudaError_t cudaStatus;
int c[size] = { 12, 12, 12, 12, 12 };
int *dev_c = 0;
cudaStatus = cudaSetDevice(0);
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaStatus = cudaMemcpy(dev_c, c, size * sizeof(int), cudaMemcpyHostToDevice);
kernel <<< 1, 1 >>>(dev_c);
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaStatus = cudaDeviceReset();
printf("%d\n", cudaStatus);
printf("{%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]);
return 0;
}
And if I run it, I got output like this:
>nvcc -run kernel.cu -gencode=arch=compute_35,code=\"sm_35,compute_35\" -rdc=true -lcudadevrt
kernel.cu
Creating library a.lib and object a.exp
0
{0,0,0,0,0}
When I call memory set, I use value 0x7FFFFFFF. I'm expecting non-zero numbers, but it always shows zero.
Is this a bug? or I did something wrong? I'm using CUDA 8.0
I can confirm this appears not to work in CUDA 8 on the systems I tested it with.
If you want a single thread to perform the operation, you can use memset directly in device code (it, like memcpy, has been supported forever). The kernel will emit a byte sized loop inline within your kernel and the operation will be handled by each running thread.
If you want a dynamic parallelism style memset operation, then the easiest thing is to make your own. A trivial (and very, very lightly tested) implementation in the code you posted might look like this:
#include <cstring>
#include <cstdio>
const int size = 5;
__global__ void myMemset_kernel(void* p, unsigned char val, size_t sz)
{
size_t tid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned char* _p = (unsigned char*)p;
for(; tid < sz; tid += blockDim.x * gridDim.x) {
_p[tid] = val;
}
}
__device__ void myMemset(void* p, unsigned int val, size_t sz, cudaStream_t s=NULL)
{
const dim3 blocksz(256,1,1);
size_t nblocks = (sz + blocksz.x -1) / blocksz.x;
unsigned charval = val & 0xff;
myMemset_kernel<<< dim3(nblocks,1,1), blocksz, 0, s >>>(p, charval, sz);
}
__global__ void kernel(int *c)
{
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
myMemset(c, 0x7FFFFFFF, size * 4, s);
cudaDeviceSynchronize();
}
int main()
{
int c[size];
int *dev_c;
memset(&c[0], 0xffffff0c, size * sizeof(int));
printf("{%08x,%08x,%08x,%08x,%08x}\n", c[0], c[1], c[2], c[3], c[4]);
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMemcpy(dev_c, c, size * sizeof(int), cudaMemcpyHostToDevice);
kernel <<< 1, 1 >>>(dev_c);
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
printf("{%08x,%08x,%08x,%08x,%08x}\n", c[0], c[1], c[2], c[3], c[4]);
return 0;
}
which compiles and does this:
$ nvcc -rdc=true -arch=sm_52 -o memset memset.cu -lcudadevrt
$ ./memset
{0c0c0c0c,0c0c0c0c,0c0c0c0c,0c0c0c0c,0c0c0c0c}
{ffffffff,ffffffff,ffffffff,ffffffff,ffffffff}
A final point -- note the values above and read this question and answer. In your code, it is not possible to use cudaMemset to apply a value of 0x7FFFFFFF. Although the value argument is an unsigned integer, cudaMemset and its relatives work like regular memset and set byte values. Only the least significant byte of the 32 bit argument is used to set values. If your objective is to set 32 bit values, then you will need to make your own version of memset for that purpose anyway.

Result of reduction#1 is wrong

I implemented the reduction#1 form the well-known slides by Mark Harris, but I obtain 0 as result. I filled the input array with the same values shown in the slides. I compiled with cuda 7.0 using the command nvcc reduction1.cu -o red1. Where is the mistake? Thanks.
#include <stdio.h>
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 16
__global__ void reduce1(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2)
{
if (tid % (2*s) == 0) sdata[tid] += sdata[tid + s];
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main()
{
int inputLength=16;
int hostInput[16]={10,1,8,-1,0,-2,3,5,-2,-3,2,7,0,11,0,2};
int hostOutput=0;
int *deviceInput;
int *deviceOutput;
cudaMalloc((void **)&deviceInput, inputLength * sizeof(int));
cudaMalloc((void **)&deviceOutput, sizeof(int));
cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(int),cudaMemcpyHostToDevice);
reduce1<<<1,THREADS_PER_BLOCK>>>(deviceInput, deviceOutput);
cudaDeviceSynchronize();
cudaMemcpy(&hostOutput, deviceOutput,sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
}
As talonmies said, you are using dynamic shared memory, but you are not allocating any memory space for it. You have to specify the size of this memory as the third argument of your kernel execution configuration.
reduce1<<<1, THREADS_PER_BLOCK, 64>>>(deviceInput, deviceOutput);
^^
Another way to fix this code is to use static shared memory. Declare your shared memory like this:
__shared__ int sdata[16];
Please read this before asking questions for CUDA.

Using of shared memory not showing desired result

I am trying to learn the usuage of Shared memory with a view to increase the performance . here I am trying to copy the global memory to shared memory. but when I have single block(256 thread) it gives the result and with more than 1 block it gives random result.
#include <cuda.h>
#include <stdio.h>
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[400];
int t = blockIdx.x * blockDim.x + threadIdx.x;
d[t] = d[t]*d[t];
s[t] =d[t];
__syncthreads();
d[t] = s[t];
}
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = threadIdx.x;
s[t] = d[t]*d[t];
__syncthreads();
d[t] = s[t];
}
int main(void)
{
const int n = 400;
int a[n], d[n];
for (int i = 0; i < n; i++)
{
a[i] = i;
}
int *d_d;
cudaMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
int block_size = 256;
int n_blocks = n/block_size + (n%block_size == 0 ? 0:1);
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
staticReverse<<<n_blocks,block_size>>>(d_d, n);
cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
printf("%d\n",d[i]);
}
}
1)what does the third argument in dynamicReverse<<<n_blocks,block_size,n*sizeof(int)>>>(d_d, n);
kernal call does? does it allocates shared memory for entire block or thread.
2) if I required more than 64kb of shared memory per multiprocessor in compute capability 5.0 what I need to do?
In your static shared memory allocation code you had three issues:
The size of the statically allocated shared memory should comply with the block size, not with the size of the input array,
You should use local thread index for indexing shared memory, instead of the global one;
You had no array out of bounds checking.
The dynamic shared memory allocation code had the same issues #2 and #3 as above, plus the fact that you were indexing global memory with local thread index, instead of global. You can use the third argument to specify the size of the shared memory to be allocated. In particular, you should allocate an amount of 256 ints, i.e., related to the block size, similarly to the static shared memory allocation case.
Here is the complete working code:
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/***********************************/
/* SHARED MEMORY STATIC ALLOCATION */
/***********************************/
#include <cuda.h>
#include <stdio.h>
__global__ void staticReverse(int *d, int n)
{
__shared__ int s[256];
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t < n) {
d[t] = d[t]*d[t];
s[threadIdx.x] =d[t];
__syncthreads();
d[t] = s[threadIdx.x];
}
}
/************************************/
/* SHARED MEMORY DYNAMIC ALLOCATION */
/************************************/
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t < n) {
s[threadIdx.x] = d[t]*d[t];
__syncthreads();
d[t] = s[threadIdx.x];
}
}
int main(void)
{
const int n = 400;
int* a = (int*) malloc(n*sizeof(int));
int* d = (int*) malloc(n*sizeof(int));
for (int i = 0; i < n; i++) { a[i] = i; }
int *d_d; gpuErrchk(cudaMalloc(&d_d, n * sizeof(int)));
// run version with static shared memory
int block_size = 256;
int n_blocks = n/block_size + (n%block_size == 0 ? 0:1);
gpuErrchk(cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice));
//staticReverse<<<n_blocks,block_size>>>(d_d, n);
dynamicReverse<<<n_blocks,block_size,256*sizeof(int)>>>(d_d, n);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < n; i++) { printf("%d\n",d[i]); }
}

Finding minimum in GPU slower than CPU

I have implemented this code: http://www.cuvilib.com/Reduction.pdf in order to calculate the sum of the elements of a matrix.
However in GPU it runs much slower than in CPU.
I got i7 processor and NVIDIA GT 540M graphics card.
Is it supposed to be that way or something else?
EDIT: I use version 3 of the above code in Ubuntu 13.04 and I compile it using Eclipse Nsight. The size of the matrix is 2097152 elements. It executes in 3.6 ms whereas the CPU version in around 1.0 ms. Below is the whole code:
#include <stdio.h>
#include <stdlib.h>
#include <thrust/sort.h>
#include <sys/time.h>
#include <omp.h>
#include <iostream>
#include <algorithm>
#define MIN(a,b) (((a)<(b))?(a):(b))
static const int WORK_SIZE = 2097152;
int find_min(int *a,int length){
int min = a[0];
for (int i=1;i<length;i++)
if (a[i]<min)
min=a[i];
return min;
}
__global__ static void red_min(int *g_idata,int *g_odata) {
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid]= g_idata[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) {
if (tid<s) {
sdata[tid] = MIN(sdata[tid],sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
int main(void) {
int *d1,*d2;
int i,*result;
int *idata,*fdata;
srand ( time(NULL) );
result = (int *)malloc(sizeof(int));
idata = (int *)malloc(WORK_SIZE*sizeof(int));
fdata = (int *)malloc(WORK_SIZE*sizeof(int));
cudaMalloc((int**)&d1,WORK_SIZE*sizeof(int));
cudaMalloc((int**)&d2,WORK_SIZE*sizeof(int));
for (i = 0; i < WORK_SIZE; i++){
idata[i] = rand();
fdata[i] = i;
}
struct timeval begin, end;
gettimeofday(&begin, NULL);
*result = find_min(idata,WORK_SIZE);
printf( "Minimum Element CPU: %d \n", *result);
gettimeofday(&end, NULL);
int time = (end.tv_sec * (unsigned int)1e6 + end.tv_usec) - (begin.tv_sec * (unsigned int)1e6 + begin.tv_usec);
printf("Microseconds elapsed CPU: %d\n", time);
cudaMemcpy(d1,idata,WORK_SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate( &start);
cudaEventCreate( &stop);
cudaEventRecord(start,0);
int num_blocks = 16384;
bool flag = true;
while (num_blocks>0){
if (flag) {
red_min<<<num_blocks,128,128*sizeof(int)>>>(d1,d2);
}
else {
red_min<<<num_blocks,128,128*sizeof(int)>>>(d2,d1);
}
num_blocks /= 128;
flag = !flag;
}
GT540M is a mobile GPU, so I assume you're running on a laptop, and furthermore you may be hosting the X display on the 540M GPU.
I built a complete version of your code:
#include <stdio.h>
#include <stdlib.h>
#include <thrust/sort.h>
#include <sys/time.h>
#include <omp.h>
#include <iostream>
#include <algorithm>
#define MIN(a,b) (((a)<(b))?(a):(b))
static const int WORK_SIZE = 2097152;
int find_min(int *a,int length){
int min = a[0];
for (int i=1;i<length;i++)
if (a[i]<min)
min=a[i];
return min;
}
__global__ static void red_min(int *g_idata,int *g_odata) {
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid]= g_idata[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) {
if (tid<s) {
sdata[tid] = MIN(sdata[tid],sdata[tid + s]);
}
__syncthreads();
}
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
int main(void) {
int *d1,*d2;
int i,*result;
int *idata,*fdata;
srand ( time(NULL) );
result = (int *)malloc(sizeof(int));
idata = (int *)malloc(WORK_SIZE*sizeof(int));
fdata = (int *)malloc(WORK_SIZE*sizeof(int));
cudaMalloc((int**)&d1,WORK_SIZE*sizeof(int));
cudaMalloc((int**)&d2,WORK_SIZE*sizeof(int));
for (i = 0; i < WORK_SIZE; i++){
idata[i] = rand();
fdata[i] = i;
}
struct timeval begin, end;
gettimeofday(&begin, NULL);
*result = find_min(idata,WORK_SIZE);
printf( "Minimum Element CPU: %d \n", *result);
gettimeofday(&end, NULL);
int time = (end.tv_sec * (unsigned int)1e6 + end.tv_usec) - (begin.tv_sec * (unsigned int)1e6 + begin.tv_usec);
printf("Microseconds elapsed CPU: %d\n", time);
cudaMemcpy(d1,idata,WORK_SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate( &start);
cudaEventCreate( &stop);
cudaEventRecord(start,0);
int num_blocks = 16384;
bool flag = true;
int loops = 0;
while (num_blocks>0){
if (flag) {
red_min<<<num_blocks,128,128*sizeof(int)>>>(d1,d2);
}
else {
red_min<<<num_blocks,128,128*sizeof(int)>>>(d2,d1);
}
num_blocks /= 128;
flag = !flag;
loops++;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et = 0.0f;
cudaEventElapsedTime(&et, start, stop);
printf("GPU time: %fms, in %d loops\n", et, loops);
int gpuresult;
if (flag)
cudaMemcpy(&gpuresult, d1, sizeof(int), cudaMemcpyDeviceToHost);
else
cudaMemcpy(&gpuresult, d2, sizeof(int), cudaMemcpyDeviceToHost);
printf("GPU min: %d\n", gpuresult);
return 0;
}
compiled it:
$ nvcc -O3 -arch=sm_20 -o t264 t264.cu
and ran it on a M2050 GPU, RHEL 5.5, CUDA 5.5, Xeon X5650 CPU
$ ./t264
Minimum Element CPU: 288
Microseconds elapsed CPU: 1217
GPU time: 0.621408ms, in 3 loops
GPU min: 288
$
So my CPU results were pretty close to yours, but my GPU results were about 5-6x faster. If we compare M2050 to your GT540M, we see that the M2050 has 14 SMs whereas the GT540M has 2. More importantly, the M2050 has about 5x the memory bandwidth of your GT540M GPU (28.8GB/s peak theoretical for GT540M vs. ~150GB/s peak theoretical for M2050)
Since a well written parallel reduction is a memory bandwidth constrained code on GPUs, the speed difference between your GPU and my GPU makes sense.
So I would say your results are probably about what is expected, and to get better results you will probably need a faster GPU.
Also, if your GT540M is also hosting an X display, it's possible that the GPU timing is corrupted by display activity. If we are timing a single kernel, this is not normally an issue - the kernel execution interrupts the display processing briefly. But when we are timing a sequence of kernels in succession, it's possible for the display tasks to jump in and execute in-between kernel calls (the GPU is multi-tasking when it is asked to both support a display and also process CUDA code). Therefore, this may be a possible performance impact in your case as well.