Accurate method to calculate double FMA and Shared memory latency - cuda

I am trying to come up with an accurate way to measure the latency of two operations:
1) Latency of a double precision FMA operation.
2) Latency of a double precision load from shared memory.
I am using a K20x and was wondering if this code would give accurate measurements.
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
using namespace std;
//Clock rate
#define MHZ 732e6
//number of streaming multiprocessors
#define SMS 14
// number of double precision units
#define DP_UNITS 16*4
//number of shared banks
#define SHARED_BANKS 32
#define ITER 100000
#define NEARONE 1.0000000000000004
__global__ void fma_latency_kernal(double *in, double *out){
int tid = blockIdx.x*blockDim.x+threadIdx.x;
double val = in[tid];
#pragma unroll 100
for(int i=0; i<ITER; i++){
val+=val*NEARONE;
}
out[tid]=val;
}
__global__ void shared_latency_kernel(double *in, double *out){
volatile extern __shared__ double smem[];
int tid = blockIdx.x*blockDim.x+threadIdx.x;
smem[threadIdx.x]=in[tid];
#pragma unroll 32
for(int i=0; i<ITER; i++){
smem[threadIdx.x]=smem[(threadIdx.x+i)%32]*NEARONE;
}
out[tid]=smem[threadIdx.x];
}
int main (int argc , char **argv){
float time;
cudaEvent_t start, stop, start2, stop2;
double *d_A, *d_B;
cudaMalloc(&d_A, DP_UNITS*SMS*sizeof(float));
cudaMalloc(&d_B, DP_UNITS*SMS*sizeof(float));
cudaError_t err;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
fma_latency_kernal<<<SMS, DP_UNITS>>>(d_A, d_B);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
time/=1000;
err = cudaGetLastError();
if(err!=cudaSuccess)
printf("Error FMA: %s\n", cudaGetErrorString(err));
printf("Latency of FMA = %3.1f clock cycles\n", (time/(double)ITER)*(double)MHZ);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventRecord(start2, 0);
shared_latency_kernel<<<1, SHARED_BANKS, sizeof(double)>>>(d_A, d_B );
cudaEventRecord(stop2, 0);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&time, start2, stop2);
time/=1000;
err = cudaGetLastError();
if(err!=cudaSuccess)
printf("Error Shared Memory: %s\n", cudaGetErrorString(err));
printf("Latency of Shared Memory = %3.1f clock cycles\n", time/(double)ITER*(double)MHZ);
}
My results on the K20x are the following:
Latency of FMA = 16.4 clock cycles
Latency of Shared Memory = 60.7 clock cycles
This seems reasonable to me, but I am not sure how accurate it is.

Your latency values look very high to me - nearly double what I'd expect. To measure how many cycles something takes on the GPU, you can insert clock() functions before and after the relevant part of the kernel function. The clock function returns the current cycle as an int, so by subtracting the first value from the second you get the the number of cycles that passed between dispatching the first clock instruction and dispatching the second clock instruction.
Note that the numbers you get from this method will include extra time from the clock instructions themselves; I believe that by default a thread will block for several cycles immediately before and after every clock instruction, so you may want to experiment with that to see how many cycles it's adding so you can subtract them back out.

Related

Where is the boundary of start and end of CPU launch and GPU launch of Nvidia Profiling NVPROF?

What is the definition of start and end of kernel launch in the CPU and GPU (yellow block)? Where is the boundary between them?
Please notice that the start, end, and duration of those yellow blocks in CPU and GPU are different.Why CPU invocation of vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); takes that long time?
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
//printf("id = %d \n", id);
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 1000000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
CPU yellow block:
GPU yellow block:
Note that you mention NVPROF but the pictures you are showing are from nvvp - the visual profiler. nvprof is the command-line profiler
GPU Kernel launches are asynchronous. That means that the CPU thread launches the kernel but does not wait for the kernel to complete. In fact, the CPU activity is actually placing the kernel in a launch queue - the actual execution of the kernel may be delayed if anything else is happening on the GPU.
So there is no defined relationship between the CPU (API) activity, and the GPU activity with respect to time, except that the CPU kernel launch must obviously precede (at least slightly) the GPU kernel execution.
The CPU (API) yellow block represents the duration of time that the CPU thread spends in a library call into the CUDA Runtime library, to launch the kernel (i.e. place it in the launch queue). This library call activity usually has some time overhead associated with it, in the range of 5-50 microseconds. The start of this period is marked by the start of the call into the library. The end of this period is marked by the time at which the library returns control to your code (i.e. your next line of code after the kernel launch).
The GPU yellow block represents the actual time period during which the kernel was executing on the GPU. The start and end of this yellow block are marked by the start and end of kernel activity on the GPU. The duration here is a function of what the code in your kernel is doing, and how long it takes.
I don't think the exact reason why a GPU kernel launch takes ~5-50 microseconds of CPU time is documented or explained anywhere in an authoritative fashion, and it is a closed source library, so you will need to acknowledge that overhead as something you have little control over. If you design kernels that run for a long time and do a lot of work, this overhead can become insignificant.

CUDA kernels are not overlapping

I have a simple vector multiplication kernel, which I am executing for 2 streams. But when I profile in NVVP, kernels do not seem to overlap. Is it because each kernel execution utilizes %100 of GPU, if not what can be the cause ?
Source code :
#include "common.h"
#include <cstdlib>
#include <stdio.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
#include <string.h>
const int N = 1 << 20;
__global__ void kernel(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = x[i] * y[i];
}
int main()
{
float *x, *y, *d_x, *d_y, *d_1, *d_2;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
cudaMalloc(&d_1, N*sizeof(float));
cudaMalloc(&d_2, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_1, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_2, y, N*sizeof(float), cudaMemcpyHostToDevice);
const int num_streams = 8;
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&stream2, cudaStreamNonBlocking);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
for (int i = 0; i < 300; i++) {
kernel << <512, 512, 0, stream1 >> >(N, d_x, d_y);
kernel << <512, 512, 0, stream2 >> >(N, d_1, d_2);
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
// cudaDeviceSynchronize();
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
cudaDeviceReset();
cudaProfilerStop();
return 0;
}
EDIT: From comments I understand each kernel is utilizing GPU fully, so what is the best approach for achieving 262144-sized vector multiplication (for multiple streams) ?
My device information :
CUDA Device Query...
There are 1 CUDA devices.
CUDA Device #0
Major revision number: 5
Minor revision number: 0
Name: GeForce GTX 850M
Total global memory: 0
Total shared memory per block: 49152
Total registers per block: 65536
Warp size: 32
Maximum memory pitch: 2147483647
Maximum threads per block: 1024
Maximum dimension 0 of block: 1024
Maximum dimension 1 of block: 1024
Maximum dimension 2 of block: 64
Maximum dimension 0 of grid: 2147483647
Maximum dimension 1 of grid: 65535
Maximum dimension 2 of grid: 65535
Clock rate: 901500
Total constant memory: 65536
Texture alignment: 512
Concurrent copy and execution: Yes
Number of multiprocessors: 5
Kernel execution timeout: Yes
The reason why your kernels don't overlap is because your gpu is 'filled' with execution threads like #Robert Crovella mentions. Checking the Compute Capabilities chapter from the CUDA Programming Guide, there is a limit of 2048 threads per SM for your CC (5.0). You have 5 SM's so this makes it
a maximum of 10240 threads that can run simultaneously on your device. You are calling 512x512=262144 threads, with just a single kernel call, and that pretty much leaves no space at all for the other kernel call.
You need to launch small enough kernels so that 2 can run concurrently on your device.
I'm not an expert on streams, but from what i've understood, if you want to run your program using streams, you need to split it up in chunks and you have to calculate a proper offset mechanism in order for your streams to be able to access their proper data. On your current code, each stream that you are launching does exactly the same calculation over exactly the same data. You have to split the data among the streams.
Other than that if you want to get the max performance you need to overlap the kernel execution with asynchronous data transfers. The easiest way to do this is to assign a scheme like the following to each of your streams like presented here
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
cudaMemcpyAsync(&d_a[offset], &a[offset], streamBytes, cudaMemcpyHostToDevice, stream[i]);
kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
cudaMemcpyAsync(&a[offset], &d_a[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i]);
}
This configuration simply tells each stream to do a memcpy then to execute the kernel on some data then to copy the data back. After the async calls, the streams will work simultaneously completing their tasks.
PS: I would also recommend to revise your kernel as well. Using one thread to compute just one multiplication is an overkill. I would use the thread to process some more data.

How could we generate random numbers in CUDA C with different seed on each run?

I am working on a stochastic process and I wanted to generate different series if random numbers in CUDA kernel each time I run the program.
This similar to what we does in C++ by declaring
seed = time(null)
followed by srand(seed)
and rand( )
I can pass seeds from host to device via the kernel but
the problem in doing this is I would have to pass an entire array of seeds into the kernel for each thread to have a different random seed each time. Is there a way I could generate random seed / process if / machine time or something like than within the kernel and pass it as a seed?
You don't need to pass an array of random seeds, but, when you use the cuRAND library, you can properly set the sequence number parameter of curand_init. For example [Disclaimer: it is a non-tested function]
__global__ void generate_random_numbers(float* numbers, unsigned long seed, int Np) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < Np) {
curandState state;
curand_init(seed, i, 0, &state);
numbers[i] = curand_uniform(&state);
}
}
You can also avoid passing the seed from outside if you change the curand_init instruction to
curand_init(clock64(), i, 0, &state);
EDIT
Following Roger Dahl's comment, I have done a comparison (Kepler K20c) between four different possibilities for the generation of arrays of 131072 elements:
Single random number generation: separate kernels for initialization and random number generation;
Single random number generation: unique kernel for initialization and random number generation;
Multiple random number generation: separate kernels for initialization and random number generation;
Multiple random number generation: unique kernel for initialization and random number generation;
Below is the code. The timing for generating has been the following:
861ms;
852ms;
866ms;
2556ms;
I hope I have correctly understood the performance issue raised by Roger Dahl.
#include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#define DSIZE 8192*16
#define nTPB 256
/***********************/
/* CUDA ERROR CHECKING */
/***********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*************************/
/* CURAND INITIALIZATION */
/*************************/
__global__ void initCurand(curandState *state, unsigned long seed){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, idx, 0, &state[idx]);
}
__global__ void testrand1(curandState *state, float *a){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
a[idx] = curand_uniform(&state[idx]);
}
__global__ void testrand2(unsigned long seed, float *a){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curandState state;
curand_init(seed, idx, 0, &state);
a[idx] = curand_uniform(&state);
}
/********/
/* MAIN */
/********/
int main() {
int n_iter = 20;
curandState *devState; gpuErrchk(cudaMalloc((void**)&devState, DSIZE*sizeof(curandState)));
float *d_a; gpuErrchk(cudaMalloc((void**)&d_a, DSIZE*sizeof(float)));
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i=0; i<n_iter; i++) {
initCurand<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(devState, 1);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
testrand1<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(devState, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time for separate kernels: %3.1f ms \n", time);
cudaEventRecord(start, 0);
for (int i=0; i<n_iter; i++) {
testrand2<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(1, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time for single kernels: %3.1f ms \n", time);
cudaEventRecord(start, 0);
for (int i=0; i<n_iter; i++) {
initCurand<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(devState, 1);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
testrand1<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(devState, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
testrand1<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(devState, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
testrand1<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(devState, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time for separate kernels with multiple random number generation: %3.1f ms \n", time);
cudaEventRecord(start, 0);
for (int i=0; i<n_iter; i++) {
testrand2<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(1, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
testrand2<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(1, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
testrand2<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(1, d_a);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time for single kernels for multiple random number generation: %3.1f ms \n", time);
getchar();
}
Output on GTX660:
Elapsed time for separate kernels: 1960.3 ms
Elapsed time for single kernels: 1536.9 ms
Elapsed time for separate kernels with multiple random number generation: 1576.0 ms
Elapsed time for single kernels for multiple random number generation: 4612.2 ms
Output on GTX570:
Elapsed time for separate kernels: 957.2 ms
Elapsed time for single kernels: 947.7 ms
Elapsed time for separate kernels with multiple random number generation: 964.6 ms
Elapsed time for single kernels for multiple random number generation: 2839.0 ms
Approximately same performance as the K20c.
Using a different seed on each run should be straightforward. The exact method will depend on which generator you are using, but if you're using one of the cuRAND generators then you can cast your time_t from time(NULL) to a 64-bit integer and pass it in to the seed functions.
If you're calling the generator from your kernel you will need to pass this seed in either as a kernel argument or via a __device__ variable. You can then use an offset to curand_init() or use skip_ahead() to get distinct sub-sequences.
If you have a specific generator for which this will not work, please post more information.
You can create more than one global function for random number initialization and generation. or create a loop to go over the global function
example:
for (int rns = 0; rns < 5; rns++) { // too seed 'loop' times
init << < N, 10 >> > (devState, time(0));
gpuErrchk(cudaMalloc((void**)&gpu_no, N * sizeof(double))); // allocate memory for random numbers on device/GPU
//rndn << < N, 10 >> > (devState, gpu_no);//invoke kernel to launch the random numbers
gpuErrchk(cudaMemcpy(cpu_no, gpu_no, N * sizeof(double), cudaMemcpyDeviceToHost))
} cout << "the transition matrix " << ++generate << " seed generation is: " << init << endl;
This does nt have any noticeable effect on the random number generated. But there is a fear of not being correlated and also lack of convergence in the long run. Why would you like to seed more than once in an iteration anyways.
you can use the library function to generate different types of random number distribution like "curand_uniform" curand_normal, curand_poission and so on.
I don't know if this answers your question.

Thrust is so slow as finding max and min of an array?

Here is my kernel call code by thrust
inline void find_min_max(thrust::device_vector<Npp8u> dev_vec, Npp8u *min, Npp8u *max){
thrust::pair<thrust::device_vector<Npp8u>::iterator,thrust::device_vector<Npp8u>::iterator> tuple;
tuple = thrust::minmax_element(dev_vec.begin(),dev_vec.end());
*min = *(tuple.first);
*max = *tuple.second;
}
I also implement same algorithm with my raw CUDA kernel by using map-reduce paradigm and also simple CPU code. As a result of the measure I see that thrust is the slowest significantly.
For brevity, I used events for measuring raw CUDA and thrust code. If events work for thrust benchmarking I am pretty sure that I measure execution time correctly.
Here is the measurement segment;
....
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
thrust::device_vector<Npp8u> image_dev(imageHost, imageHost+N);
// Device vector allocation
find_min_max(image_dev,&min,&max);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime1;
cudaEventElapsedTime(&elapsedTime1, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
totalTime1 = elapsedTime1/1000
....
My real question, is there any possibility to have better approach other than the simple minmax_element function in thrust?
My machine specs: It is asus k55v laptop with GeForce 635M and i7 processor.
And all codes for Thrust code and CPU code
You haven't shown any of your code that you're comparing to thrust, you haven't given any machine specifications (GPU, CPU, etc.) and you also haven't told us what were the actual measured times.
Nevertheless, I took your code and created a test case out of it, comparing thrust vs. STL (since you haven't shown your CPU code or any other implementation):
#include <stdio.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include <algorithm>
#include <time.h>
#define N 1000000
#define LOOPS 1000
inline void find_min_max(thrust::device_vector<int> &dev_vec, int *min, int *max){
thrust::pair<thrust::device_vector<int>::iterator,thrust::device_vector<int>::iterator> tuple;
tuple = thrust::minmax_element(dev_vec.begin(),dev_vec.end());
*min = *(tuple.first);
*max = *tuple.second;
}
int main(){
int minele, maxele;
std::vector<int> a;
for (int i=0; i<N; i++)
a.push_back(rand());
thrust::host_vector<int> h_a(N);
thrust::copy(a.begin(), a.end(), h_a.begin());
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i=0; i < LOOPS; i++){
thrust::device_vector<int> d_a = h_a;
find_min_max(d_a,&minele,&maxele);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime1, totalTime1;
cudaEventElapsedTime(&elapsedTime1, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
totalTime1 = elapsedTime1/(1000*LOOPS);
printf("thrust min element = %d, max element = %d\n", minele, maxele);
printf("thrust time = %f\n", totalTime1);
clock_t t;
t = clock();
std::vector<int>::iterator resultmax, resultmin;
for (int i = 0; i<LOOPS; i++){
resultmax = std::max_element(a.begin(), a.end());
resultmin = std::min_element(a.begin(), a.end());
}
t = clock() - t;
printf("STL min element = %d, max element = %d\n", *resultmin, *resultmax);
printf("STL time = %f\n", ((float)t)/(CLOCKS_PER_SEC*LOOPS));
return 0;
}
I compiled this code using CUDA 5.0, RHEL 5.5, Xeon X5560 2.8GHz CPU, and a Quadro 5000 GPU, which is a cc 2.0 device that is somewhat slower than an M2050 (11 SMs vs. 14), these are the results:
thrust min element = 1210, max element = 2147480021
thrust time = 0.001741
STL min element = 1210, max element = 2147480021
STL time = 0.004520
Even if we make allowance for the fact that I am using 2 function calls in STL to get min and max, (knowing that c++11 standard includes a single minmax function call) and cut the STL time in half, thrust is faster.
If you want to discuss why your case may be special, please include a complete, compilable, simple comparison code, similar to what I have provided, along with your machine specifications, and the actual timing results.
As a minor optimization comment, if you pass the device_vector to your find_min_max function by reference (&) rather than by value, it will run a bit quicker.
In my case, if I take the host-> device_vector copy out of the timing loop, my thrust time drops from 0.001741 seconds to 0.000387 seconds, indicating that the host-> device copy is about 78% of the total thrust time.
EDIT: Now that you've posted your code (although you don't mention the timings you get) I ran it with a 512x512 lena grayscale image, and got the following results on my setup:
$ ./cpu
Version: P5
Comment: # Created by Imlib
Width: 512 Height: 512
Max value: 255
ELAPSED TIME -AVG finding max and min: 0.0014437
ELAPSED TIME -AVG finding max and min: 0.0038715
$ ./thr
Load PGM file.
Version: P5
Comment: # Created by Imlib
Width: 512 Height: 512
Max value: 255
ELAPSED TIME -AVG for kernel 1: 0.000658944
ELAPSED TIME -AVG for kernel 2: 0.000179552
$
So it seems to me that even for your code, thrust is faster on my setup.

CUDA kernel is slower than CPU

I'm new to CUDA and I'm probably doing something wrong.
All I need is logical operation on two binary vectors. Length of vectors is 2048000. I compared speed between logical and in Matlab's C mex file and in CUDA kernel. C on CPU is ~5% faster than CUDA. Please note that I measured only kernel execution (without memory transfer). I have i7 930 and 9800GT.
##MEX file testCPU.c:##
#include "mex.h"
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] ) {
int i, varLen;
unsigned char *vars, *output;
vars = mxGetPr(prhs[0]);
plhs[0] = mxCreateLogicalMatrix(2048000, 1);
output = mxGetPr(plhs[0]);
for (i=0;i<2048000;i++){
output[i] = vars[i] & vars[2048000+i];
}
}
Compile
mex testCPU.c
Create vectors
vars = ~~(randi(2,2048000,2)-1);
Measure speed:
tic;testCPU(vars);toc;
CUDA:
#CUDA file testGPU.cu#
#include "mex.h"
#include "cuda.h"
__global__ void logical_and(unsigned char* in, unsigned char* out, int N) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
out[idx] = in[idx] && in[idx+N];
}
void mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] ) {
int i;
unsigned char *vars, *output, *gpu, *gpures;
vars = (unsigned char*)mxGetData(prhs[0]);
plhs[0] = mxCreateLogicalMatrix(2048000, 1);
output = (unsigned char*)mxGetData(plhs[0]);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float dt_ms;
// input GPU malloc
cudaEventRecord(start, 0);
cudaMalloc( (void **) &gpu, sizeof(unsigned char)*4096000);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("GPU input malloc: %f ms, %i\n", dt_ms, cudaGetLastError());
// output GPU malloc
cudaEventRecord(start, 0);
cudaMalloc( (void **) &gpures, sizeof(unsigned char)*2048000);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("GPU output malloc: %f ms, %i\n", dt_ms, cudaGetLastError());
// copy from CPU to GPU
cudaEventRecord(start, 0);
cudaMemcpy( gpu, vars, sizeof(unsigned char)*4096000, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("copy input from CPU to GPU: %f ms, %i\n", dt_ms, cudaGetLastError());
dim3 dimBlock(32);
printf("thread count: %i\n", dimBlock.x);
dim3 dimGrid(2048000/dimBlock.x);
printf("block count: %i\n", dimGrid.x);
// --- KERNEL ---
cudaEventRecord(start, 0);
logical_and<<<dimGrid, dimBlock>>>(gpu, gpures, 2048000);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("GPU kernel: %f ms, %i\n", dt_ms, cudaGetLastError());
// result from GPU to CPU
cudaEventRecord(start, 0);
cudaMemcpy( output, gpures, sizeof(unsigned char)*2048000, cudaMemcpyDeviceToHost );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dt_ms, start, stop);
printf("copy output from GPU to CPU: %f ms, %i\n", dt_ms, cudaGetLastError());
cudaFree(gpu);
cudaFree(gpures);
}
Compile:
nvmex -f nvmexopts_9.bat testGPU.cu
-I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v4.2\include"
-L"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v4.2\lib\x64" -lcudart -lcufft
Output:
GPU input malloc: 0.772160 ms, 0
GPU output malloc: 0.041728 ms, 0
copy input from CPU to GPU: 1.494784 ms, 0
thread count: 32
block count: 64000
*** GPU kernel: 3.761216 ms, 0 ***
copy output from GPU to CPU: 1.203488 ms, 0
Is that code OK? CPU was ~0.1ms faster than CUDA kernel. I tried different thread counts (multipliers of 32) up to 512, 32 was fastest. Operator & instead of && was almost 1ms slower.
Is 9800GT really so weak? What speed-up can I expect with today's mainstream card (ie. GTX460,560)?
Thank you
EDIT: based on talonmies' comment, I made these modifications:
Kernel function:
__global__ void logical_and(uchar4* in, uchar4* out, int N) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
out[idx].x = in[idx].x & in[idx+N].x;
out[idx].y = in[idx].y & in[idx+N].y;
out[idx].z = in[idx].z & in[idx+N].z;
out[idx].w = in[idx].w & in[idx+N].w;
}
Main function:
uchar4 *gpu, *gpures;
// 32 was worst, 64,128,256,512 were similar
dim3 dimBlock(128);
// block count is now 4xtimes smaller
dim3 dimGrid(512000/dimBlock.x);
Output:
GPU input malloc: 0.043360 ms, 0
GPU output malloc: 0.038592 ms, 0
copy input from CPU to GPU: 1.499584 ms, 0
thread count: 128
block count: 4000
*** GPU kernel: 0.131296 ms, 0 ***
copy output from GPU to CPU: 1.281120 ms, 0
Is that correct? Almost 30x speed-up! It seems too good to be true, but result is correct :)
How faster will be GTX560 on this particular task? Thx
Edit 2:
Is this code
__global__ void logical_and(uchar4* in, uchar4* out, int N) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
out[idx].x = in[idx].x & in[idx+N].x;
out[idx].y = in[idx].y & in[idx+N].y;
out[idx].z = in[idx].z & in[idx+N].z;
out[idx].w = in[idx].w & in[idx+N].w;
}
automatically transformed to:
__global__ void logical_and(uchar4* in, uchar4* out, int N) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
uchar4 buff;
buff.x = in[idx].x;
buff.y = in[idx].y;
buff.z = in[idx].z;
buff.w = in[idx].w;
buff.x &= in[idx+N].x;
buff.y &= in[idx+N].y;
buff.z &= in[idx+N].z;
buff.w &= in[idx+N].w;
out[idx].x = buff.x;
out[idx].y = buff.y;
out[idx].z = buff.z;
out[idx].w = buff.w;
}
by compiler?
If it is correct, it explains my confusion about coalesced access. I thought that in[idx] & in[idx+N] leads to non-coalesced access, because of accessing non-contiguous memory. But in fact, in[idx] and in[idx+N] are loaded in two coalesced steps. N can be any multiple of 16, because uchar4 is 4 bytes long, and for coalesced access address must be aligned to 64 bytes (on 1.1 device). Am I right?
As talonmies pointed out, you're accessing and processing your data byte-wise, which is far from optimal. A collection of techniques you may want to consider, such as Instruction-Level Parallelism and buffered read/writes, are summarized in the nVidia Webinar Better Performance at Lower Occupancy by Vasily Volkov.
In a nutshell, what you want to do is, in each thread, read several uint4 in a coalesced way, process them, and only then store them.
Update
Does it make any difference if you re-write your code as follows?
__global__ void logical_and(unsigned int* in, unsigned int* out, int N) {
int idx = blockIdx.x*blockDim.x*chunksize+threadIdx.x;
unsigned int buff[chunksize];
#pragma unroll
for ( int k = 0 ; k < chunksize ; k++ )
buff[k] = in[ blockDim.x*k + idx ];
#pragma unroll
for ( int k = 0 ; k < chunksize ; k++ )
buff[k] &= in[ blockDim.x*k + idx + N ];
#pragma unroll
for ( int k = 0 ; k < chunksize ; k++ )
out[ blockDim.x*k + idx ] = buff[k];
}
Note that I've assumed chunksize is a variable you've #defined somewhere, e.g.
#define chunksize 4
And that you have to divide the number of blocks you launch and N by that number. I've also used unsigned int which is just four packed uchar. In your calling function, you may have to cast your pointers accordingly.
What i think its happening is called false sharing. I think the problem is that the byte-sized regions you are trying to write from your threads are producing a massive race condition because different threads are trying to write to the same word-aligned address. I'm not sure the details in GPU, but in CPU, when different threads try to write to memory in the same 256-byte aligned region (called cache lines) they will continuously block each other, plummeting your global performance.