About Cuda 1D convolution, How can I do this faster? [closed] - cuda

Closed. This question needs details or clarity. It is not currently accepting answers.
Want to improve this question? Add details and clarify the problem by editing this post.
Closed 2 years ago.
Improve this question
int threads = 32;
dim3 blocks(250000/31,129,50);
coefsize = 129;
__global__ void D_Conv(float *in, float* coef, float *out, int coefsize)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y; //129
int k = blockIdx.z; //50
if (j < coefsize && i < 250000 && k < 50)
{
if (i - j >= 0 && i - j < 250000)
{
atomicAdd(&out[k*250000 + i], coef[j] * in[k*250000 + i - j]);
}
}
}
Many people recommend convolution with FFT, but in this case, two array's sizes have wide variances( 129 and 250000). So convolution with FFT is slower than this method.

I don't believe atomics should be necessary here. The only thread clashing you would have is in the y dimension, so we can simply reduce your overall grid (in y) and convert the operation to a loop computing a running sum. You have plenty of threads in your grid to saturate any GPU, even without the y dimension.
Here's an example:
$ cat t20.cu
#include <iostream>
#define TOL 0.1
__global__ void D_Conv(float *in, float* coef, float *out, int coefsize)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y; //129
int k = blockIdx.z; //50
if (j < coefsize && i < 250000 && k < 50)
{
if (i - j >= 0 && i - j < 250000)
{
atomicAdd(&out[k*250000 + i], coef[j] * in[k*250000 + i - j]);
}
}
}
__global__ void D_Conv_i(float *in, float* coef, float *out, int coefsize)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
//int j = blockIdx.y; //129
int k = blockIdx.z; //50
if (i < 250000 && k < 50)
{
float s = 0;
for (int j = 0; j < 129; j++)
if (i - j >= 0 && i - j < 250000) s += coef[j] * in[k*250000 + i - j];
out[k*250000 + i] += s;
}
}
int main(){
int num_c = 50;
int csz = 250000;
int coefsize = 129;
int isz = num_c*csz;
int osz = num_c*csz;
float *d_in, *h_in, *d_coef, *h_coef, *d_out, *h_out, *h_out_i;
cudaMalloc(&d_in, isz*sizeof(float));
cudaMalloc(&d_out, osz*sizeof(float));
cudaMalloc(&d_coef, coefsize*sizeof(float));
h_in = new float[isz];
h_out = new float[osz];
h_out_i = new float[osz];
h_coef = new float[coefsize];
cudaMemset(d_out, 0, osz*sizeof(float));
for (int i = 0; i < coefsize; i++) h_coef[i] = i%5;
for (int i = 0; i < isz; i++) h_in[i] = i%4;
cudaMemcpy(d_in, h_in, isz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_coef, h_coef, coefsize*sizeof(float), cudaMemcpyHostToDevice);
int threads = 128;
dim3 blocks((csz+threads-1)/threads, coefsize, num_c);
D_Conv<<<blocks, threads>>>(d_in, d_coef, d_out, coefsize);
cudaMemcpy(h_out, d_out, osz*sizeof(float), cudaMemcpyDeviceToHost);
dim3 blocks2((csz+threads-1)/threads, 1, num_c);
cudaMemset(d_out, 0, osz*sizeof(float));
D_Conv_i<<<blocks2, threads>>>(d_in, d_coef, d_out, coefsize);
cudaMemcpy(h_out_i, d_out, osz*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < osz; i++) if (fabsf(h_out_i[i] - h_out[i]) > TOL) {std::cout << "mismatch at: " << i << " was: " << h_out_i[i] << " should be: " << h_out[i] << std::endl; return 0;}
}
$ nvcc -o t20 t20.cu
$ cuda-memcheck ./t20
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors
$ nvprof ./t20
==14221== NVPROF is profiling process 14221, command: ./t20
==14221== Profiling application: ./t20
==14221== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 53.54% 43.853ms 2 21.926ms 21.863ms 21.989ms [CUDA memcpy DtoH]
26.97% 22.087ms 1 22.087ms 22.087ms 22.087ms D_Conv(float*, float*, float*, int)
17.30% 14.172ms 2 7.0860ms 1.4400us 14.171ms [CUDA memcpy HtoD]
2.04% 1.6702ms 1 1.6702ms 1.6702ms 1.6702ms D_Conv_i(float*, float*, float*, int)
0.14% 118.24us 2 59.122us 56.386us 61.858us [CUDA memset]
API calls: 75.11% 270.97ms 3 90.322ms 189.31us 270.50ms cudaMalloc
23.11% 83.367ms 4 20.842ms 45.694us 44.579ms cudaMemcpy
1.07% 3.8698ms 4 967.45us 449.83us 2.5106ms cuDeviceTotalMem
0.59% 2.1262ms 404 5.2620us 332ns 230.46us cuDeviceGetAttribute
0.06% 223.31us 4 55.828us 47.710us 74.669us cuDeviceGetName
0.03% 98.648us 2 49.324us 31.800us 66.848us cudaMemset
0.02% 86.603us 2 43.301us 13.778us 72.825us cudaLaunchKernel
0.01% 21.169us 4 5.2920us 3.2030us 8.0240us cuDeviceGetPCIBusId
0.00% 11.459us 8 1.4320us 427ns 4.2700us cuDeviceGet
0.00% 3.6360us 3 1.2120us 563ns 1.6820us cuDeviceGetCount
0.00% 2.7220us 4 680ns 520ns 877ns cuDeviceGetUuid
$
(CUDA 11.1U1, Tesla V100)
we can see that the atomic kernel takes over 20ms, whereas the non-atomic kernel runs in less than 2ms. Also note that I am running with 128 threads per block rather than 32. Not sure why you chose 32, I would aim for 64 or higher.
Because the size of coef array is relatively small, and the access pattern is uniform across the warp, we can take advantage of __constant__ memory for this data. This gives an additional speed-up:
$ cat t20.cu
#include <iostream>
#define TOL 0.1
__global__ void D_Conv(float *in, float* coef, float *out, int coefsize)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y; //129
int k = blockIdx.z; //50
if (j < coefsize && i < 250000 && k < 50)
{
if (i - j >= 0 && i - j < 250000)
{
atomicAdd(&out[k*250000 + i], coef[j] * in[k*250000 + i - j]);
}
}
}
__constant__ float Ccoef[129];
__global__ void D_Conv_i(float *in, float* coef, float *out, int coefsize)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
//int j = blockIdx.y; //129
int k = blockIdx.z; //50
if (i < 250000 && k < 50)
{
float s = 0;
for (int j = 0; j < 129; j++)
if (i - j >= 0 && i - j < 250000) s += Ccoef[j] * in[k*250000 + i - j];
out[k*250000 + i] += s;
}
}
int main(){
int num_c = 50;
int csz = 250000;
int coefsize = 129;
int isz = num_c*csz;
int osz = num_c*csz;
float *d_in, *h_in, *d_coef, *h_coef, *d_out, *h_out, *h_out_i;
cudaMalloc(&d_in, isz*sizeof(float));
cudaMalloc(&d_out, osz*sizeof(float));
cudaMalloc(&d_coef, coefsize*sizeof(float));
h_in = new float[isz];
h_out = new float[osz];
h_out_i = new float[osz];
h_coef = new float[coefsize];
cudaMemset(d_out, 0, osz*sizeof(float));
for (int i = 0; i < coefsize; i++) h_coef[i] = i%5;
for (int i = 0; i < isz; i++) h_in[i] = i%4;
cudaMemcpy(d_in, h_in, isz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_coef, h_coef, coefsize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(Ccoef, h_coef, coefsize*sizeof(float));
int threads = 128;
dim3 blocks((csz+threads-1)/threads, coefsize, num_c);
D_Conv<<<blocks, threads>>>(d_in, d_coef, d_out, coefsize);
cudaMemcpy(h_out, d_out, osz*sizeof(float), cudaMemcpyDeviceToHost);
dim3 blocks2((csz+threads-1)/threads, 1, num_c);
cudaMemset(d_out, 0, osz*sizeof(float));
D_Conv_i<<<blocks2, threads>>>(d_in, d_coef, d_out, coefsize);
cudaMemcpy(h_out_i, d_out, osz*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < osz; i++) if (fabsf(h_out_i[i] - h_out[i]) > TOL) {std::cout << "mismatch at: " << i << " was: " << h_out_i[i] << " should be: " << h_out[i] << std::endl; return 0;}
}
$ nvcc -o t20 t20.cu
$ cuda-memcheck ./t20
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors
$ nvprof ./t20
==2191== NVPROF is profiling process 2191, command: ./t20
==2191== Profiling application: ./t20
==2191== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 54.38% 44.047ms 2 22.024ms 21.997ms 22.051ms [CUDA memcpy DtoH]
27.25% 22.075ms 1 22.075ms 22.075ms 22.075ms D_Conv(float*, float*, float*, int)
17.15% 13.888ms 3 4.6294ms 1.4720us 13.885ms [CUDA memcpy HtoD]
1.07% 869.88us 1 869.88us 869.88us 869.88us D_Conv_i(float*, float*, float*, int)
0.15% 117.83us 2 58.913us 56.321us 61.505us [CUDA memset]
API calls: 77.28% 307.94ms 3 102.65ms 188.61us 307.49ms cudaMalloc
20.70% 82.467ms 4 20.617ms 48.300us 44.617ms cudaMemcpy
1.27% 5.0520ms 4 1.2630ms 593.63us 3.2465ms cuDeviceTotalMem
0.62% 2.4765ms 404 6.1290us 450ns 261.77us cuDeviceGetAttribute
0.07% 271.54us 4 67.884us 59.173us 88.716us cuDeviceGetName
0.02% 97.041us 2 48.520us 30.831us 66.210us cudaMemset
0.02% 86.276us 2 43.138us 14.800us 71.476us cudaLaunchKernel
0.01% 23.142us 1 23.142us 23.142us 23.142us cudaMemcpyToSymbol
0.01% 21.576us 4 5.3940us 3.0900us 8.4600us cuDeviceGetPCIBusId
0.00% 13.604us 8 1.7000us 667ns 4.4800us cuDeviceGet
0.00% 5.7060us 3 1.9020us 452ns 3.5840us cuDeviceGetCount
0.00% 3.2440us 4 811ns 660ns 1.0340us cuDeviceGetUuid
$
The improved kernel now runs in less than 1ms, for a ~20x speed-up.

Related

Why PyCUDA is faster than C CUDA in this example

I am exploring to move from OpenCL to CUDA, and did a few tests to benchmark the speed of CUDA in various implementations. To my surprise, in the examples below, the PyCUDA implementation is about 20% faster than the C CUDA example.
I read many posts talking about "release build" of C CUDA code. I did try having -Xptxas -O3 in the makefile and that really did not make a difference. I also tried to adjust the block size, with which the kernel was executed. Unfortunately, it did not help improve the speed, either.
My questions here are:
What could be the reasons leading to the speed difference between C CUDA and PYCUDA?
If the "advanced" (lack of a better word) compiling in PYCUDA is one of reasons, how can I optimize the compiling of my C CUDA code?
Are there any other ways to improve the speed of C CUDA in this case?
While I appreciate general comments, I am looking for actionable suggestions that I can validate on my machine. Thanks!
import pycuda.autoinit
import pycuda.driver as drv
import numpy as np
from pycuda.compiler import SourceModule
import time
mod = SourceModule(
"""
__global__ void saxpy(int n, const float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n){
y[i] = a * x[i] + y[i];
}
}
"""
)
saxpy = mod.get_function("saxpy")
N = 1 << 25
time_elapse = 0.0
for i in range(100):
# print(i)
# print(N)
x = np.ones(N).astype(np.float32)
y = 2 * np.ones(N).astype(np.float32)
start = time.time()
saxpy(
np.int32(N),
np.float32(2.0),
drv.In(x),
drv.InOut(y),
block=(512, 1, 1),
grid=(int(N / 512) + 1, 1),
)
time_elapse += (time.time() - start)
print(time_elapse )
print(y[-100:-1])
print(y.sum())
print(N * 4.0)
#include <stdio.h>
#include <time.h>
#define DIM 512
__global__ void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main(int num_iterations)
{
double start;
double cputime;
int N = 1 << 25;
float *x, *y, *d_x, *d_y;
int i, j;
for (j = 0; j < num_iterations; j++)
{
x = (float *)malloc(N * sizeof(float));
y = (float *)malloc(N * sizeof(float));
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_y, N * sizeof(float));
for (i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
start = clock();
saxpy<<<(N + DIM) / DIM, DIM>>>(N, 2.0f, d_x, d_y);
cputime += ((double)(clock() - start) / CLOCKS_PER_SEC);
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
// float maxError = 0.0f;
// for (int i = 0; i < N; i++){
// maxError = max(maxError, abs(y[i] - 4.0f));
// //printf("y[%d]: %f\n", i,y[i]);
// }
// printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
printf("cpu time is %f\n", cputime);
return 0;
}
I saved the above file as cuda_example.cu and compile it with the following commands in a makefile:
nvcc -arch=sm_61 -Xptxas -O3,-v -o main cuda_example.cu
If I execute your CUDA-C code as is, and set num_iterations to 300 like this:
int num_iterations =300;
then the execution of your program takes about 60s on a Geforce GTX 1650. Your code is extremely inefficient, as you copy data back and forth between GPU and device at every iteration.
So, lets restrict the loop to just the kernel execution:
#include <stdio.h>
#include <time.h>
#define DIM 512
__global__ void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a * x[i] + y[i];
}
int main()
{
double start = clock();
int N = 1 << 25;
float *x, *y, *d_x, *d_y;
int i, j;
int num_iterations = 300;
x = (float *)malloc(N * sizeof(float));
y = (float *)malloc(N * sizeof(float));
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_y, N * sizeof(float));
for (i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice);
for (j = 0; j < num_iterations; j++){
saxpy<<<(N + DIM) / DIM, DIM>>>(N, 2.0f, d_x, d_y);
cudaDeviceSynchronize();
}
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
double cputime = ((double)(clock() - start) / CLOCKS_PER_SEC);
printf("cpu time is %f\n", cputime);
return 0;
}
If I do that, then the execution time becomes 1.36 seconds. Doing sth similar to the PyCUDA code I got about 19s of execution time.

CUDA Stride function is not working

The following code does not work. My expectation is all the y[i] have 3 after the kernel function add() is called. But if N >= (1 << 24) - 255, all the y[i]'s are 2 (as if the kernel function add() did not run).
#include <iostream>
__global__ void add(int n, int *x, int *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) y[i] = x[i] + y[i];
}
int main() {
int *x, *y, N = (1 << 24) - 255; // 255 wrong / 256 ok
cudaMallocManaged(&x, N * sizeof(int));
cudaMallocManaged(&y, N * sizeof(int));
for (int i = 0; i < N; ++i) {x[i] = 1; y[i] = 2;}
int sz = 256;
dim3 blockDim(sz,1,1);
dim3 gridDim((N+sz-1)/sz,1,1);
add<<<gridDim, blockDim>>>(N, x, y);
cudaDeviceSynchronize();
for (int i = 0; i < N; ++i) if (y[i]!=3) std::cout << "error" << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
}
The GPU is a GTX1080Ti and has the following limits:
Maximum number of threads per block: 1024
Max dimension size of a thread block (x,y,z): (1024, 1024, 64)
Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535)
Machine is X86_64 Linux Ubuntu 16.04. Am I doing something wrong here? Please help.
I did not specify -arch= when compiling this. So I ended up using -arch=sm_20, which is the default value. I used -arch=sm_60 and now it is working as the x dimension of the grid size is 2147483647 for computing capability 3 or above.
http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities

Incorrect addition of Prime numbers in CUDA [duplicate]

This question already has an answer here:
How to find the sum of array in CUDA by reduction
(1 answer)
Closed 3 years ago.
I use reduction logic in code by referring How to find the sum of array in CUDA by reduction.
But It is giving some errors. I am not getting my mistake, could you please help me out??
required specification:
1.Cuda toolkit v6.5
2. graphics: GTX 210 (compute capability 1.2)
3. visual studio 2013
#include<stdio.h>
#include<cuda.h>
#include<malloc.h>
#include<conio.h>
#include<time.h>
#include<windows.h>
#define SIZE 10
#define N 100
__global__ void vectoreAdd(int *d_a, int *d_b, int *d_c)
{
__shared__ int sdata[256];
int i = threadIdx.x + (blockIdx.x*blockDim.x);
sdata[threadIdx.x] = d_a[i];
__syncthreads();
if (i<SIZE)
for (i = 2; i<SIZE; i++)
{
int counter = 0;
for (int j = 2; j<d_a[i]; j++)
{
if (d_a[i] % j == 0)
{
counter = 1; break;
}
}
if (counter == 0)
{
d_b[i] = d_a[i];
}
}
// do reduction in shared mem
for (int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * threadIdx.x;;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (threadIdx.x == 0)
atomicAdd(d_c, sdata[0]);
}
}
int main()
{
clock_t tic = clock();
int *a, *b, *summation=0, sum = 0,count=-1; //declare summation as double/long if needed
int *d_a, *d_b, *d_c;
//int blocks, block_size = 512;
int size = N * sizeof(int);
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
summation = (int *)malloc(SIZE*sizeof(int));
cudaMalloc((void**)&d_a, SIZE * sizeof(int));
cudaMalloc((void**)&d_b, SIZE * sizeof(int));
cudaMalloc((void**)&d_c, SIZE * sizeof(int));
for (int i = 1; i<SIZE; i++)
{
a[i] = i;
b[i] = 0;
}
cudaMemcpy(d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, SIZE*sizeof(int), cudaMemcpyHostToDevice);
/*blocks = SIZE / block_size;
if (SIZE% block_size != 0)
blocks++; */
dim3 blocksize(256); // create 1D threadblock
dim3 gridsize(N / blocksize.x); //create 1D grid
vectoreAdd << < gridsize, blocksize >> >(d_a, d_b, d_c);
//cudaThreadSynchronize();
cudaMemcpy(b, d_b, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(summation, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
for (int m = 0; m < SIZE; m++)
{
if (b[m] != 0)
{
printf("\n prime no is:%d", b[m]);
count = count + 1;
}
}
printf("\n\n Total prime no. are: %d", count);
/* for (int j = 1; j<SIZE; j++)
{
sum = sum + b[j];
}*/
printf("\n \nsum of all prime no upto %d is:%d", SIZE, summation);
clock_t toc = clock();
printf("\n\nElapsed: %f seconds\n", (double)(toc - tic) / CLOCKS_PER_SEC);
free(a); free(b); free(summation);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
getchar(); return 0;
}
There are lots of mistakes in your code :
cudaMalloc((void**)&d_a, SIZE * sizeof(int));
should be :
cudaMalloc((void**)&d_a, N * sizeof(int)); //OR
cudaMalloc((void**)&d_a, size);
as you already calculated but didnt passed it. same in case of malloc() //Host code

Instructions Per Count (IPC) and Instruction Level Parallelism (ILP) in CUDA

I observe IPC drops as ILP goes up for 32-bit int operations when trying to speed up my cryptographic kernel. The kernel consists of fairly unrolled loops of long sequence of ADD and XOR operations, which should have a throughput of 160 ops per 192 cores per cycle on Kepler (GTX Titan/780).
IPC for my kernel hits the upper bound of 3.28. Using ILP even drops IPC. Apparently ILP fails to help achieve my goal -- fully utilize the pipeline, so I wrote some little experiments. I put the code for ILP 4 at the end.
Profiler Measurements
Results are measured on GTX Titan.
cubin outputs are examined to make sure no instructions are eliminated during optimization.
Executed IPC is almost the same as issued IPC, so I just list one of them.
ADD instructions (XORs have identical behavior)
| ILP 1 | ILP 2 | ILP 4 | ILP 8
--------------------------------------------------
IPC | 4.00 | 3.32 | 2.72 | 3.44
--------------------------------------------------
Issue Slot | 99.17% | 59.34% | 48.61% | 61.71%
Utilization | | | |
I expect ILP 2, 4 and 8 would give better performance, but not.
Recall the integer throughput is 160. The 4 warp scheduler per SM should dual issue up to 5 instructions per cycle, so that IPC should go up towards 5. How can I explain what I observed? Why is the issue slot 99% utilized when IPC = 4?
Float / Int ADD instruction mix
If I modify the code for ILP 4 to do two int ADDs and two float ADDs:
IPC: 5.1
Issue slot utilization: 99.12%
Strangely enough, it seems that the warp scheduler does a better job to issue floating operations.
Discussion
Available literature suggests using ILP help reach the peak performance for floating point operations. Why doesn't ILP apply to integers? How can I do this for integer operations?
My kernel theoretically should do 2.25 integer operations per candidate. This is consistent with what I observed in cuobjdump. There are 2^48 candidates, so the minimun runtime on GTX Titan should be 2.25 * 2^48 / (2688 * 160/192) / 876 MHz = 322.75s. Is this estimation reasonable?
The measured performance for my kernel is 523s. This does imply that integer throughput is only about 160 * 3.28 (measure IPC) / 5 (max IPC).
ILP test code
__device__ int x[10];
__global__ void test(int flag = 0)
{
int a = x[0], b = x[1], c = x[2], d = x[3];
int _a = x[4], _b = x[5], _c = x[6], _d = x[7];
#pragma unroll 128
for (int i = 0; i < 51200; ++i)
{
asm volatile("add.u32 %0, %0, %1;": "+r"(a): "r"(_a));
asm volatile("add.u32 %0, %0, %1;": "+r"(b): "r"(_b));
asm volatile("add.u32 %0, %0, %1;": "+r"(c): "r"(_c));
asm volatile("add.u32 %0, %0, %1;": "+r"(d): "r"(_d));
}
int v = a + b + c + d;
if (flag * v == 1)
x[0] = v;
}
Code fragment for 4 candidates
Each candidate takes 9 / 4 = 2.25 ops. Cuobjdump also verifies this.
d ^= d2(1, 3); // d2 is located in constant memory
s ^= d;
t ^= d2(1, 16);
u ^= d2(1, 17);
v ^= some_const;
flag_s = min(flag_s, s); // int min has throughput of 160
flag_t = flag_t || (s == t); // setp.or should be the same
flag_u = flag_u || (s == u);
flag_v = flag_v || (s == v);
I'm providing an answer to remove this question from the unanswered list.
I do not observe a change in executed Instructions Per Count (IPC) with Instruction Level Parallelism. Overall, it is difficult to argue the reason for the effect observed by the OP without knowing any further information but that provided by the OP himself (f.i., the launch configuration).
In the code below, I'm considering an example using floats, although I have tested the same code with ints without changing the conceptual results. The code implements cyclical Multiply Add (MAD) operations with ILP=1, ILP=2 and ILP=4.
The executed IPC has been the following
ILP IPC FLOPs
1 3.924 67108864
2 4.323 67108864
4 4.016 67108864
for N=8192. The code has been compiled with CUDA 8.0 and run on an NVIDIA GT920M. As it can be seen, IPC keeps almost constant for the differently considered values of ILP. The Floating Point Operations (FLOPs) as estimated by the code assuming 2 FLOPs per MAD coincides with that measured by the Visual Profiler.
THE CODE
#include<stdio.h>
#define N_ITERATIONS 8192
#include "Utilities.cuh"
#include "TimingGPU.cuh"
#define BLOCKSIZE 512
//#define DEBUG
/********************************************************/
/* KERNEL0 - NO INSTRUCTION LEVEL PARALLELISM (ILP = 0) */
/********************************************************/
__global__ void kernel0(float * __restrict__ d_a, const float * __restrict__ d_b, const float * __restrict__ d_c, const int N) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N) {
float a = d_a[tid];
float b = d_b[tid];
float c = d_c[tid];
for (unsigned int i = 0; i < N_ITERATIONS; i++) {
a = a * b + c;
}
d_a[tid] = a;
}
}
/*****************************************************/
/* KERNEL1 - INSTRUCTION LEVEL PARALLELISM (ILP = 2) */
/*****************************************************/
__global__ void kernel1(float * __restrict__ d_a, const float * __restrict__ d_b, const float * __restrict__ d_c, const int N) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N / 2) {
float a1 = d_a[tid];
float b1 = d_b[tid];
float c1 = d_c[tid];
float a2 = d_a[tid + N / 2];
float b2 = d_b[tid + N / 2];
float c2 = d_c[tid + N / 2];
for (unsigned int i = 0; i < N_ITERATIONS; i++) {
a1 = a1 * b1 + c1;
a2 = a2 * b2 + c2;
}
d_a[tid] = a1;
d_a[tid + N / 2] = a2;
}
}
/*****************************************************/
/* KERNEL2 - INSTRUCTION LEVEL PARALLELISM (ILP = 4) */
/*****************************************************/
__global__ void kernel2(float * __restrict__ d_a, const float * __restrict__ d_b, const float * __restrict__ d_c, const int N) {
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N / 4) {
float a1 = d_a[tid];
float b1 = d_b[tid];
float c1 = d_c[tid];
float a2 = d_a[tid + N / 4];
float b2 = d_b[tid + N / 4];
float c2 = d_c[tid + N / 4];
float a3 = d_a[tid + N / 2];
float b3 = d_b[tid + N / 2];
float c3 = d_c[tid + N / 2];
float a4 = d_a[tid + 3 * N / 4];
float b4 = d_b[tid + 3 * N / 4];
float c4 = d_c[tid + 3 * N / 4];
for (unsigned int i = 0; i < N_ITERATIONS; i++) {
a1 = a1 * b1 + c1;
a2 = a2 * b2 + c2;
a3 = a3 * b3 + c3;
a4 = a4 * b4 + c4;
}
d_a[tid] = a1;
d_a[tid + N / 4] = a2;
d_a[tid + N / 2] = a3;
d_a[tid + 3 * N / 4] = a4;
}
}
/********/
/* MAIN */
/********/
int main() {
//const int N = 8192 * 64;
const int N = 8192;
//const int N = 1024;
TimingGPU timerGPU;
float *h_a = (float*)malloc(N*sizeof(float));
float *h_a_result_host = (float*)malloc(N*sizeof(float));
float *h_a_result_device = (float*)malloc(N*sizeof(float));
float *h_b = (float*)malloc(N*sizeof(float));
float *h_c = (float*)malloc(N*sizeof(float));
for (int i = 0; i<N; i++) {
h_a[i] = 2.;
h_b[i] = 1.;
h_c[i] = 2.;
h_a_result_host[i] = h_a[i];
for (unsigned int k = 0; k < N_ITERATIONS; k++) {
h_a_result_host[i] = h_a_result_host[i] * h_b[i] + h_c[i];
}
}
float *d_a; gpuErrchk(cudaMalloc((void**)&d_a, N*sizeof(float)));
float *d_b; gpuErrchk(cudaMalloc((void**)&d_b, N*sizeof(float)));
float *d_c; gpuErrchk(cudaMalloc((void**)&d_c, N*sizeof(float)));
gpuErrchk(cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_c, h_c, N*sizeof(float), cudaMemcpyHostToDevice));
/***********/
/* KERNEL0 */
/***********/
timerGPU.StartCounter();
kernel0 << <iDivUp(N, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, d_c, N);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// --- Remember: timing is in ms
printf("Number of operations = %f; GFlops = %f\n", (float)N*(float)N_ITERATIONS, (1.e-6)*((float)N*(float)N_ITERATIONS) / timerGPU.GetCounter());
gpuErrchk(cudaMemcpy(h_a_result_device, d_a, N*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i<N; i++) if (h_a_result_device[i] != h_a_result_host[i]) { printf("Error at i=%i! Host = %f; Device = %f\n", i, h_a_result_host[i], h_a_result_device[i]); return 1; }
/***********/
/* KERNEL1 */
/***********/
gpuErrchk(cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice));
timerGPU.StartCounter();
kernel1 << <iDivUp(N / 2, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, d_c, N);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// --- Remember: timing is in ms
printf("Number of operations = %f; GFlops = %f\n", (float)N*(float)N_ITERATIONS, (1.e-6)*((float)N*(float)N_ITERATIONS) / timerGPU.GetCounter());
gpuErrchk(cudaMemcpy(h_a_result_device, d_a, N*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i<N; i++) if (h_a_result_device[i] != h_a_result_host[i]) { printf("Error at i=%i! Host = %f; Device = %f\n", i, h_a_result_host[i], h_a_result_device[i]); return 1; }
/***********/
/* KERNEL2 */
/***********/
gpuErrchk(cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice));
timerGPU.StartCounter();
kernel2 << <iDivUp(N / 4, BLOCKSIZE), BLOCKSIZE >> >(d_a, d_b, d_c, N);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
// --- Remember: timing is in ms
printf("Number of operations = %f; GFlops = %f\n", (float)N*(float)N_ITERATIONS, (1.e-6)*((float)N*(float)N_ITERATIONS) / timerGPU.GetCounter());
gpuErrchk(cudaMemcpy(h_a_result_device, d_a, N*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i<N; i++) if (h_a_result_device[i] != h_a_result_host[i]) { printf("Error at i=%i! Host = %f; Device = %f\n", i, h_a_result_host[i], h_a_result_device[i]); return 1; }
cudaDeviceReset();
return 0;
}

Matrix Multiplication giving wrong output [duplicate]

This question already has an answer here:
Unable to execute device kernel in CUDA
(1 answer)
Closed 7 years ago.
What I am attempting to do is Multiply Matrix A & Matrix B and then from the product matrix I get the index of the maximum value per column. But unfortunately, only the first 128*128 values of the matrix multiplication are correct while others are just garbage. I do not quite understand how this works. I request you to kindly guide me with this ..
#include<stdio.h>
#include "cuda.h"
#include<stdlib.h>
#define blockD 32
const int wA = 128;
const int hA = 4096;
const int wB = 4096;
const int hB = wA;
main(void){
void MatrixMultiplication(float *, float *, float *, float *);
int size_A = wA * hA * sizeof(float);
int size_B = wB * hB * sizeof(float);
int size_C = wB * hA * sizeof(float);
int size_max = 2 * wB * sizeof(float);
float *M, *N, *P, *C;
// allocate memory on the CPU
M = (float*)malloc(size_A);
N = (float*)malloc(size_B);
P = (float*)malloc(size_max);
C = (float*)malloc(size_C);
// initialize the matrices
for (int y=0; y < hA; y++) {
for (int x=0; x < wA; x++){
M[y*wA + x] = 32; //x + y*wA;
}
}
for (int y=0; y<hB; y++) {
for (int x=0; x<wB; x++){
N[y*wB + x] = 21; //x + y*wB;
}
}
MatrixMultiplication(M, N, P, C);
//Write
FILE *f1;
int i,j;
f1 = fopen("C.txt","w");
for(i = hA - 2 ; i < hA; i ++){
for(j = 0; j < wB; j++){
fprintf(f1,"%d\t",int(C[i*wB + j]));
}
fprintf(f1,"\n");
}
fclose(f1);
// free the memory allocated on the CPU
free( M );
free( N );
free( P );
free( C );
cudaDeviceReset();
return 0;
}
__device__ void MaxFunction(float* Pd, float* max)
{
int x = (threadIdx.x + blockIdx.x * blockDim.x);
int y = (threadIdx.y + blockIdx.y * blockDim.y);
int k = 0;
int temp = 0; int temp_idx = 0;
for (k = 0; k < wB; ++k) {
if(Pd[x*wB + k] > temp){
temp = Pd[x*wB + k];
temp_idx = x*wB + k;
}
}
max[y*2 + 0] = temp;
max[y*2 + 1] = temp_idx;
}
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, float* max)
{
// declare cache in the shared memory
__shared__ float Mds[blockD][blockD];
__shared__ float Nds[blockD][blockD];
float Pvalue = 0;
// Loop over the Md and Nd block dimension required to compute the Pd element
for (int m = (wA * blockD * blockIdx.y), n = (blockD * blockIdx.x);
m < ((wA * blockD * blockIdx.y)+wA-1);
m += blockD, n += (blockD*hB)){
// collaboratively loading of Md and Nd blocks into shared memory
Mds[threadIdx.y][threadIdx.x] = Md[m + wA * threadIdx.y + threadIdx.x];
Nds[threadIdx.y][threadIdx.x] = Nd[n + wA * threadIdx.y + threadIdx.x];
__syncthreads();
// keep track of the running sum
for (int k = 0; k < blockD; k++)
Pvalue += Mds[threadIdx.y][k] * Nds[k][threadIdx.x];
__syncthreads();
}
// write back to the global memory
int p = hB * blockD * blockIdx.y + blockD * blockIdx.x;
Pd[p + hB * threadIdx.y + threadIdx.x] = Pvalue;
__syncthreads();
MaxFunction(Pd, max);
}
void MatrixMultiplication(float *M, float *N, float *P, float *C) {
int size_A = wA * hA * sizeof(float);
int size_B = wB * hB * sizeof(float);
int size_C = wB * hA * sizeof(float);
int size_max = 2 * wB * sizeof(float);
float *Md, *Nd, *Pd, *max;
// allocate memory on the GPU
cudaMalloc((void**)&Md, size_A);
cudaMalloc((void**)&Nd, size_B);
cudaMalloc((void**)&Pd, size_C);
cudaMalloc((void**)&max, size_max);
// transfer M and N to device memory
cudaMemcpy(Md, M, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(Nd, N, size_B, cudaMemcpyHostToDevice);
// kernel invocation code
dim3 dimBlock(blockD, blockD);
dim3 dimGrid(wA/blockD, hB/blockD);
//Execute Kernel
MatrixMulKernel<<<dimGrid, dimBlock>>>( Md, Nd, Pd, max);
// transfer P from device
cudaMemcpy(P, max, size_max, cudaMemcpyDeviceToHost);
cudaMemcpy(C, Pd, size_C, cudaMemcpyDeviceToHost);
// free the memory allocated on the GPU
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
cudaFree(max);
}
In your code you seem to have more than one problem. One of the problems is, in place of this:
dim3 dimGrid(wA/blockD, hB/blockD);
You should have this:
dim3 dimGrid(wB/blockD, hA/blockD);
Ultimately you need one thread in your grid for each output point. Your formulation was giving you a grid of 4 blocks by 4 blocks, whereas you need a grid of 128 blocks by 128 blocks.
The other problem I found with your code was in these lines in the kernel:
int p = hB * blockD * blockIdx.y + blockD * blockIdx.x;
Pd[p + hB * threadIdx.y + threadIdx.x] = Pvalue;
They are not indexing properly through the output array. Rather than try to sort it out using your scheme, I used this instead:
Pd[(threadIdx.x + (blockIdx.x * blockDim.x)) + ((threadIdx.y + (blockIdx.y * blockDim.y))*(gridDim.x*blockDim.x))] = Pvalue;
When I made the above two changes to your code, I got what I believe are correct results throughout the array. And it took about 32 seconds on my machine to run it. (Note that I haven't tried fixing your original max-finding code -- see below for a better approach.)
Based on your previous question, you seemed to be concerned about speed. If you want to do fast matrix multiply, you should use cublas. The following code shows how to use cublas to multiply two ordinary C-style matrices (they don't have to be square). I've also included a column-max finding kernel that will be fast when the number of columns is large (say, over 500 or so. You have 4096 columns in your example). For small numbers of columns, there may be quicker ways to perform this function, but small numbers of columns also suggests that the overall problem size may be small and so speed (of this piece of code) will not really be an issue.
Here's the code:
#include <stdio.h>
#include <cublas_v2.h>
#define VERBOSE 1
#define nTPB 64
#define ROW_A 4
#define COL_A 4
#define ROW_B COL_A
#define COL_B 4
#define ROW_C ROW_A
#define COL_C COL_B
#define SIZ_A (ROW_A*COL_A)
#define SIZ_B (ROW_B*COL_B)
#define SIZ_C (ROW_C*COL_C)
// error check macros
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
// for CUBLAS V2 API
#define cublasCheckErrors(fn) \
do { \
cublasStatus_t __err = fn; \
if (__err != CUBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "Fatal cublas error: %d (at %s:%d)\n", \
(int)(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void col_max(float *mat, float *max, unsigned int *midx, unsigned int rows, unsigned int cols){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < cols){
float tempmax = mat[idx];
unsigned int tempmidx = 0;
for (int i = 1; i< rows; i++)
if (mat[idx + (i*cols)] > tempmax){
tempmax = mat[idx + (i*cols)];
tempmidx = i;}
max[idx] = tempmax;
midx[idx] = tempmidx;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C, *h_max, *d_max;
unsigned int *h_idx, *d_idx;
h_A = (float *)malloc(SIZ_A*sizeof(float));
if (h_A==0) {printf("malloc fail\n"); return -1;}
h_B = (float *)malloc(SIZ_B*sizeof(float));
if (h_B==0) {printf("malloc fail\n"); return -1;}
h_C = (float *)malloc(SIZ_C*sizeof(float));
if (h_C==0) {printf("malloc fail\n"); return -1;}
h_max = (float *)malloc(COL_C*sizeof(float));
if (h_max==0) {printf("malloc fail\n"); return -1;}
h_idx = (unsigned int*)malloc(COL_C*sizeof(unsigned int));
if (h_idx==0) {printf("malloc fail\n"); return -1;}
cudaMalloc((void **)&d_A, SIZ_A*sizeof(float));
cudaMalloc((void **)&d_B, SIZ_B*sizeof(float));
cudaMalloc((void **)&d_C, SIZ_C*sizeof(float));
cudaMalloc((void **)&d_max, COL_C*sizeof(float));
cudaMalloc((void **)&d_idx, COL_C*sizeof(unsigned int));
cudaCheckErrors("cuda malloc fail");
// initialize data
for (int i=0; i< SIZ_A; i++) h_A[i] = (float)(i+1);
for (int i=0; i< SIZ_B; i++) h_B[i] = (float)(i+2);
cudaMemcpy(d_A, h_A, SIZ_A*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, SIZ_B*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cuda memcpy 1 fail");
const float alpha = 1.0f;
const float beta = 0.0f;
cublasHandle_t handle;
cublasCheckErrors(cublasCreate(&handle));
// C = A*B
// due to cublas expecting column-major storage, parameters
// are scrambled
cublasCheckErrors(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, COL_B, ROW_A, COL_A, &alpha, d_B, COL_B, d_A, COL_A, &beta, d_C, COL_C));
cudaMemcpy(h_C, d_C, SIZ_C*sizeof(float), cudaMemcpyDeviceToHost);
cudaCheckErrors("cuda memcpy 2 fail");
col_max<<<(COL_C + nTPB - 1)/nTPB, nTPB>>>(d_C, d_max, d_idx, ROW_C, COL_C);
cudaCheckErrors("kernel launch fail");
cudaMemcpy(h_max, d_max, COL_C*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_idx, d_idx, COL_C*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaCheckErrors("cuda memcpy 3 fail/kernel fail");
if (VERBOSE){
printf("A: \n");
for (int i=0; i< ROW_A; i++){
for (int j=0; j< COL_A; j++)
printf("%7.5G", h_A[j+(i*COL_A)]);
printf("\n");}
printf("B: \n");
for (int i=0; i< ROW_B; i++){
for (int j=0; j< COL_B; j++)
printf("%7.5G", h_B[j+(i*COL_B)]);
printf("\n");}
printf("C = A*B: \n");
for (int i=0; i< ROW_C; i++){
for (int j=0; j< COL_C; j++)
printf("%7.5G", h_C[j+(i*COL_C)]);
printf("\n");}
printf("COLUMN MAX:\n");
for (int i=0; i< COL_C; i++)
printf("%7.5G", h_max[i]);
printf("\nCOLUMN MAX IDX:\n");
for (int i=0; i< COL_C; i++)
printf("%7d", h_idx[i]);
}
printf("\n finished!\n");
return 0;
}
Here's what I used to compile:
$ nvcc -arch=sm_20 -O3 -o t221 t221.cu -lcublas
And here's the sample output:
$ cuda-memcheck ./t221
========= CUDA-MEMCHECK
A:
1 2 3 4
5 6 7 8
9 10 11 12
13 14 15 16
B:
2 3 4 5
6 7 8 9
10 11 12 13
14 15 16 17
C = A*B:
100 110 120 130
228 254 280 306
356 398 440 482
484 542 600 658
COLUMN MAX:
484 542 600 658
COLUMN MAX IDX:
3 3 3 3
finished!
========= ERROR SUMMARY: 0 errors
$
When I extended my code to handle the same sizes you indicated, (A = 4096x128, B=128x4096) it took about 1 second on my machine. So it's much faster than your code. However, when I take your code and comment out your call to MaxFunction in the kernel, it also only takes about 1 second to compute the matrix multiply result. So if you wanted to keep your matrix multiply code (i.e. not use cublas) you could break the code into 2 kernels, and use your multiply routine in the first kernel with my max-finding routine (col_max) in the second kernel, and also probably get a pretty fast result.
As #talonmies indicated, if you are running on a windows machine, be sure you are aware of the ramifications of windows TDR. (search that in the upper right corner search box if needed)