GTX 1080 Floating Point Performance Calculation [duplicate] - cuda

I'm trying to reach peak performance of each SM from the code below. The peak lies somewhere between 25 GFlops(GTX275-GT200 Arch.). This code gives 8 GFlops at the max.
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
//LOOP=10000000
//No. of blocks = 1
//Threads per block = 512 (I'm using GTX 275 - GT200 Arch.)
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
I don't want to increase ILP in the code. Any ideas why it's not reaching peak??
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
flag = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB);
cudaMemcpy(x,dx,sizeof(float)*THPB,cudaMemcpyDeviceToHost);
gflops = ((double)(BLOCKS)*(THPB)*LOOP/elapsed)/1000000;
printf("\t%f",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
Platform:
CUDA 3.0
NVIDIA GeForce GTX275 (GT200)

If I put together a complete repro case from your code, using the correct FLOP calculation:
#include <stdio.h>
#define LOOP (10000000)
#define BLOCKS (30)
#define THPB (512)
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB*BLOCKS);
cudaMemcpy(x,dx,sizeof(float)*THPB*BLOCKS,cudaMemcpyDeviceToHost);
gflops = 2.0e-6 * ((double)(LOOP)*double(THPB*BLOCKS)/(double)elapsed);
printf("\t%f\n",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
And I compile it and run it on a 1.4GHz GTX275 with CUDA 3.2 on a 64 bit linux platform:
$ nvcc -arch=sm_13 -Xptxas="-v" -o perf perf.cu
ptxas info : Compiling entry function '_Z7new_kerPf' for 'sm_13'
ptxas info : Used 4 registers, 8+16 bytes smem, 8 bytes cmem[1]
$ ./perf
Kernel1:
671.806039
I get within 0.01% of peak FLOP/s for that card running a pure FMAD code (1.4 GHz * 2 FLOP * 8 cores/MP * 30 MP) = 672 GFLOP/s.
So it seems that the code does, in fact, hit peak FLOP/s with one block per multiprocessor, but you just are not calculating the FLOP/s number correctly.

Related

CUDA kernels are not overlapping

I have a simple vector multiplication kernel, which I am executing for 2 streams. But when I profile in NVVP, kernels do not seem to overlap. Is it because each kernel execution utilizes %100 of GPU, if not what can be the cause ?
Source code :
#include "common.h"
#include <cstdlib>
#include <stdio.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
#include <string.h>
const int N = 1 << 20;
__global__ void kernel(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = x[i] * y[i];
}
int main()
{
float *x, *y, *d_x, *d_y, *d_1, *d_2;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
cudaMalloc(&d_1, N*sizeof(float));
cudaMalloc(&d_2, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_1, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_2, y, N*sizeof(float), cudaMemcpyHostToDevice);
const int num_streams = 8;
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&stream2, cudaStreamNonBlocking);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
for (int i = 0; i < 300; i++) {
kernel << <512, 512, 0, stream1 >> >(N, d_x, d_y);
kernel << <512, 512, 0, stream2 >> >(N, d_1, d_2);
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
// cudaDeviceSynchronize();
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed time : %f ms\n", elapsedTime);
cudaDeviceReset();
cudaProfilerStop();
return 0;
}
EDIT: From comments I understand each kernel is utilizing GPU fully, so what is the best approach for achieving 262144-sized vector multiplication (for multiple streams) ?
My device information :
CUDA Device Query...
There are 1 CUDA devices.
CUDA Device #0
Major revision number: 5
Minor revision number: 0
Name: GeForce GTX 850M
Total global memory: 0
Total shared memory per block: 49152
Total registers per block: 65536
Warp size: 32
Maximum memory pitch: 2147483647
Maximum threads per block: 1024
Maximum dimension 0 of block: 1024
Maximum dimension 1 of block: 1024
Maximum dimension 2 of block: 64
Maximum dimension 0 of grid: 2147483647
Maximum dimension 1 of grid: 65535
Maximum dimension 2 of grid: 65535
Clock rate: 901500
Total constant memory: 65536
Texture alignment: 512
Concurrent copy and execution: Yes
Number of multiprocessors: 5
Kernel execution timeout: Yes
The reason why your kernels don't overlap is because your gpu is 'filled' with execution threads like #Robert Crovella mentions. Checking the Compute Capabilities chapter from the CUDA Programming Guide, there is a limit of 2048 threads per SM for your CC (5.0). You have 5 SM's so this makes it
a maximum of 10240 threads that can run simultaneously on your device. You are calling 512x512=262144 threads, with just a single kernel call, and that pretty much leaves no space at all for the other kernel call.
You need to launch small enough kernels so that 2 can run concurrently on your device.
I'm not an expert on streams, but from what i've understood, if you want to run your program using streams, you need to split it up in chunks and you have to calculate a proper offset mechanism in order for your streams to be able to access their proper data. On your current code, each stream that you are launching does exactly the same calculation over exactly the same data. You have to split the data among the streams.
Other than that if you want to get the max performance you need to overlap the kernel execution with asynchronous data transfers. The easiest way to do this is to assign a scheme like the following to each of your streams like presented here
for (int i = 0; i < nStreams; ++i) {
int offset = i * streamSize;
cudaMemcpyAsync(&d_a[offset], &a[offset], streamBytes, cudaMemcpyHostToDevice, stream[i]);
kernel<<<streamSize/blockSize, blockSize, 0, stream[i]>>>(d_a, offset);
cudaMemcpyAsync(&a[offset], &d_a[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i]);
}
This configuration simply tells each stream to do a memcpy then to execute the kernel on some data then to copy the data back. After the async calls, the streams will work simultaneously completing their tasks.
PS: I would also recommend to revise your kernel as well. Using one thread to compute just one multiplication is an overkill. I would use the thread to process some more data.

cudaEventElapsedTime not expected behaviour

I'm trying to compute the total time taken in GPU to compute something. I'm using the cudaEventRecord and cudaEventElapsedTime to determine this, but I'm having a unexpected behavior, or at least, unexpected for me :) I wrote this example to understand what's happening and I'm still confused.
In the example below I was expecting to report the same time for the three iterations but the result is:
2.80342
1003
2005.6
Which means that the total time in considering the CPU sleep time.
Am I doing something wrong? If not, is it possible do what I want?
#include <iostream>
#include <thread>
#include <chrono>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
__global__ void kernel_test(int *a, int N) {
for(int i=threadIdx.x;i<N;i+=N) {
if(i<N)
a[i] = 1;
}
}
int main(int argc, char ** argv) {
cudaEvent_t start[3], stop[3];
for(int i=0;i<3;i++) {
cudaEventCreate(&start[i]);
cudaEventCreate(&stop[i]);
}
cudaStream_t stream;
cudaStreamCreate(&stream);
const int N = 1024 * 1024;
int *h_a = (int*)malloc(N * sizeof(int));
int *a = 0;
cudaMalloc((void**)&a, N * sizeof(int));
for(int i=0;i<3;i++) {
cudaEventRecord(start[i], stream);
cudaMemcpyAsync(a, h_a, N * sizeof(int), cudaMemcpyHostToDevice, stream);
kernel_test<<<1, 1024, 0, stream>>>(a, N);
cudaMemcpyAsync(h_a, a, N*sizeof(int), cudaMemcpyDeviceToHost, stream);
cudaEventRecord(stop[i], stream);
std::this_thread::sleep_for (std::chrono::seconds(i));
cudaEventSynchronize(stop[i]);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start[i], stop[i]);
std::cout<<milliseconds<<std::endl;
}
return 0;
}
I attach the nsight result to verify the behaviour of my example.
Windows 8.1
Geforce GTX 780 Ti
Nvidia drivers: 358.50
EDIT:
Added code to be complete
Attached nsight result
Added SO and drivers info
, ,
If you're running the program on Windows using the WDDM (in contrast to TCC with Tesla cards or Linux) this may be the issue:
With the WDDM kernels are not executed immediately after invocation but instead enqueued to a command buffer. Once the buffer is full it gets flushed and the enqueued commands are actually executed. Another option to force the command buffer to be explicitly flushed is to synchronize.
Now what happens is that you wait before the command buffer is acutally flushed...
Edit
Also see https://devtalk.nvidia.com/default/topic/548639/is-wddm-causing-this-/ for the problem and how cudaEventQuery(0) may help

cudaMemcpy doesn't work in 64 bits

I made a very simple CUDA kernel which populates an array of 100 elements with f[i]=i (and checked using assert in another kernel that it had indeed done so).
#include<stdio.h>
#include<assert.h>
//definizione di gpuErrchk
__global__ void setToItself(int* vect){
vect[threadIdx.x] = threadIdx.x;
}
int main(){
int* a_d;
gpuErrchk( cudaMalloc(&a_d, 100 * sizeof(int)) );
setToItself<<<1,100>>>(a_d);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
I then copy the array back to the host for displaying using
int* a_h[100];
gpuErrchk( cudaMemcpy(a_h, a_d, 100*sizeof(int), cudaMemcpyDeviceToHost) );
for (int i = 0; i < 100; i++)
printf("%d %d\n",i, a_h[i]);
system("PAUSE");
}
When I compile it in 32 bit it works fine, but in 64 bits i get
f[0]=0
f[1]=2
f[2]=4
...
and the second half of the array is undefined.
I tried to change the compute_ and sm_ back from 35 to 20 as was default on 32 bits, but to no avail.
I tried adding error checking everywhere, but I got no output, meaning all functions returned success.
I searched, but there doesn't seem to be particular problems associated with 64 bit transition. What is this then?
This is not correct:
int* a_h[100];
It should be:
int a_h[100];
You want an array of 100 int values. Not an array of 100 int pointers. int values don't change their size between 32 and 64 bits. int pointers do change their size.

Cuda program not working for more than 1024 threads

My program is of Odd-even merge sort and it's not working for more than 1024 threads.
I have already tried increasing the block size to 100 but it still not working for more than 1024 threads.
I'm using Visual Studio 2012 and I have Nvidia Geforce 610M. This is my program
#include<stdio.h>
#include<iostream>
#include<conio.h>
#include <random>
#include <stdint.h>
#include <driver_types.h >
__global__ void odd(int *arr,int n){
int i=threadIdx.x;
int temp;
if(i%2==1&&i<n-1){
if(arr[i]>arr[i+1])
{
temp=arr[i];
arr[i]=arr[i+1];
arr[i+1]=temp;
}
}
}
__global__ void even(int *arr,int n){
int i=threadIdx.x;
int temp;
if(i%2==0&&i<n-1){
if(arr[i]>arr[i+1])
{
temp=arr[i];
arr[i]=arr[i+1];
arr[i+1]=temp;
}
}
}
int main(){
int SIZE,k,*A,p,j;
int *d_A;
float time;
printf("Enter the size of the array\n");
scanf("%d",&SIZE);
A=(int *)malloc(SIZE*sizeof(int));
cudaMalloc(&d_A,SIZE*sizeof(int));
for(k=0;k<SIZE;k++)
A[k]=rand()%1000;
cudaMemcpy(d_A,A,SIZE*sizeof(int),cudaMemcpyHostToDevice);
if(SIZE%2==0)
p=SIZE/2;
else
p=SIZE/2+1;
for(j=0;j<p;j++){
even<<<3,SIZE>>>(d_A,SIZE);
if(j!=p-1)
odd<<<3,SIZE>>>(d_A,SIZE);
if(j==p-1&&SIZE%2==0)
odd<<<1,SIZE>>>(d_A,SIZE);
}
cudaMemcpy(A,d_A,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
for(k=0;k<SIZE;k++)
printf("%d ",A[k]);
free(A);
cudaFree(d_A);
getch();
}
CUDA threadblocks are limited to 1024 threads (or 512 threads, for cc 1.x gpus). The size of the threadblock is indicated in the second kernel configuration parameter in the kernel launch:
even<<<3,SIZE>>>(d_A,SIZE);
^^^^
So when you enter a SIZE value greater than 1024, this kernel will not launch.
You're getting no indication of this because you're not doing proper cuda error checking which is always a good idea any time you're having trouble with a CUDA code. You can also, as a quick test, run your code with cuda-memcheck to look for API errors.

Can't reach peak performance

I'm trying to reach peak performance of each SM from the code below. The peak lies somewhere between 25 GFlops(GTX275-GT200 Arch.). This code gives 8 GFlops at the max.
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
//LOOP=10000000
//No. of blocks = 1
//Threads per block = 512 (I'm using GTX 275 - GT200 Arch.)
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
I don't want to increase ILP in the code. Any ideas why it's not reaching peak??
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
flag = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB);
cudaMemcpy(x,dx,sizeof(float)*THPB,cudaMemcpyDeviceToHost);
gflops = ((double)(BLOCKS)*(THPB)*LOOP/elapsed)/1000000;
printf("\t%f",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
Platform:
CUDA 3.0
NVIDIA GeForce GTX275 (GT200)
If I put together a complete repro case from your code, using the correct FLOP calculation:
#include <stdio.h>
#define LOOP (10000000)
#define BLOCKS (30)
#define THPB (512)
__global__ void new_ker(float *x)
{
int index = threadIdx.x+blockIdx.x*blockDim.x;
float a,b;
a=0;
b=x[index];
#pragma unroll 2048
for(int i=0;i<LOOP;i++){
a=a*b+b;
}
x[index] = a;
}
int main(int argc,char **argv)
{
//Initializations
float *x;
float *dx;
cudaEvent_t new_start,new_stop;
float elapsed;
double gflops;
x = 0;
cudaMalloc((void **)&dx,sizeof(float)*THPB);
//ILP=1
cudaEventCreate(&new_start);
cudaEventCreate(&new_stop);
printf("Kernel1:\n");
cudaEventRecord(new_start, 0);
new_ker<<<BLOCKS,THPB>>>(dx);
cudaEventRecord(new_stop,0);
cudaEventSynchronize(new_stop);
cudaEventElapsedTime(&elapsed,new_start,new_stop);
x = (float *)malloc(sizeof(float)*THPB*BLOCKS);
cudaMemcpy(x,dx,sizeof(float)*THPB*BLOCKS,cudaMemcpyDeviceToHost);
gflops = 2.0e-6 * ((double)(LOOP)*double(THPB*BLOCKS)/(double)elapsed);
printf("\t%f\n",gflops);
cudaEventDestroy(new_start);
cudaEventDestroy(new_stop);
return 0;
}
And I compile it and run it on a 1.4GHz GTX275 with CUDA 3.2 on a 64 bit linux platform:
$ nvcc -arch=sm_13 -Xptxas="-v" -o perf perf.cu
ptxas info : Compiling entry function '_Z7new_kerPf' for 'sm_13'
ptxas info : Used 4 registers, 8+16 bytes smem, 8 bytes cmem[1]
$ ./perf
Kernel1:
671.806039
I get within 0.01% of peak FLOP/s for that card running a pure FMAD code (1.4 GHz * 2 FLOP * 8 cores/MP * 30 MP) = 672 GFLOP/s.
So it seems that the code does, in fact, hit peak FLOP/s with one block per multiprocessor, but you just are not calculating the FLOP/s number correctly.