Simple CUDA Kernel Optimization - cuda

In the process of speeding up an application, I have a very simple kernel which does the type casting as shown below:
__global__ void UChar2FloatKernel(float *out, unsigned char *in, int nElem){
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i<nElem)
out[i] = (float) in[i];
}
The global memory access is coalesced and in my understanding using shared memory will also not be beneficial as there are not multiple reads of the same memory. Does any one have any idea if there is any optimization which can be performed to speed up this kernel. The input and output data is already on the device, so no host to device memory copy will be required.

The single biggest optimisation you can perform on a code like that one is to use resident threads and increase the number of transactions each thread performs. While the CUDA block scheduling model is pretty lightweight, it isn't free, and launching a lot blocks containing threads which do only a single memory load and single memory store will accrue a lot of block scheduling overhead. So only launch as many blocks as will "fill" the all the SM of your GPU and have each thread do more work.
The second obvious optimization is switch to 128 byte memory transactions for loads, which should give you a tangible bandwidth utilization gain. On a Fermi or Kepler GPU this won't give as large a performance boost as on first and second generation hardware.
Putting this altogether into a simple benchmark:
__global__
void UChar2FloatKernel(float *out, unsigned char *in, int nElem)
{
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i<nElem)
out[i] = (float) in[i];
}
__global__
void UChar2FloatKernel2(float *out,
const unsigned char *in,
int nElem)
{
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<nElem; i+=gridDim.x*blockDim.x) {
out[i] = (float) in[i];
}
}
__global__
void UChar2FloatKernel3(float4 *out,
const uchar4 *in,
int nElem)
{
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<nElem; i+=gridDim.x*blockDim.x) {
uchar4 ival = in[i]; // 32 bit load
float4 oval = make_float4(ival.x, ival.y, ival.z, ival.w);
out[i] = oval; // 128 bit store
}
}
int main(void)
{
const int n = 2 << 20;
unsigned char *a = new unsigned char[n];
for(int i=0; i<n; i++) {
a[i] = i%255;
}
unsigned char *a_;
cudaMalloc((void **)&a_, sizeof(unsigned char) * size_t(n));
float *b_;
cudaMalloc((void **)&b_, sizeof(float) * size_t(n));
cudaMemset(b_, 0, sizeof(float) * size_t(n)); // warmup
for(int i=0; i<5; i++)
{
dim3 blocksize(512);
dim3 griddize(n/512);
UChar2FloatKernel<<<griddize, blocksize>>>(b_, a_, n);
}
for(int i=0; i<5; i++)
{
dim3 blocksize(512);
dim3 griddize(8); // 4 blocks per SM
UChar2FloatKernel2<<<griddize, blocksize>>>(b_, a_, n);
}
for(int i=0; i<5; i++)
{
dim3 blocksize(512);
dim3 griddize(8); // 4 blocks per SM
UChar2FloatKernel3<<<griddize, blocksize>>>((float4*)b_, (uchar4*)a_, n/4);
}
cudaDeviceReset();
return 0;
}
gives me this on a small Fermi device:
>nvcc -m32 -Xptxas="-v" -arch=sm_21 cast.cu
cast.cu
tmpxft_000014c4_00000000-5_cast.cudafe1.gpu
tmpxft_000014c4_00000000-10_cast.cudafe2.gpu
cast.cu
ptxas : info : 0 bytes gmem
ptxas : info : Compiling entry function '_Z18UChar2FloatKernel2PfPKhi' for 'sm_2
1'
ptxas : info : Function properties for _Z18UChar2FloatKernel2PfPKhi
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas : info : Used 5 registers, 44 bytes cmem[0]
ptxas : info : Compiling entry function '_Z18UChar2FloatKernel3P6float4PK6uchar4
i' for 'sm_21'
ptxas : info : Function properties for _Z18UChar2FloatKernel3P6float4PK6uchar4i
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas : info : Used 8 registers, 44 bytes cmem[0]
ptxas : info : Compiling entry function '_Z17UChar2FloatKernelPfPhi' for 'sm_21'
ptxas : info : Function properties for _Z17UChar2FloatKernelPfPhi
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas : info : Used 3 registers, 44 bytes cmem[0]
tmpxft_000014c4_00000000-5_cast.cudafe1.cpp
tmpxft_000014c4_00000000-15_cast.ii
>nvprof a.exe
======== NVPROF is profiling a.exe...
======== Command: a.exe
======== Profiling result:
Time(%) Time Calls Avg Min Max Name
40.20 6.61ms 5 1.32ms 1.32ms 1.32ms UChar2FloatKernel(float*, unsigned char*, int)
29.43 4.84ms 5 968.32us 966.53us 969.46us UChar2FloatKernel2(float*, unsigned char const *, int)
26.35 4.33ms 5 867.00us 866.26us 868.10us UChar2FloatKernel3(float4*, uchar4 const *, int)
4.02 661.34us 1 661.34us 661.34us 661.34us [CUDA memset]
In the latter two kernel, using only 8 blocks gives a large speed up compared to 4096 blocks, which confirms the idea that multiple work items per thread is the best way to improve performance in this sort of memory bound, low instruction count kernel.

Here is a cpu version of the function and 4 gpu kernels. 3 kernels are from #talonmies answer and I have added kernel2 which only utilizes vector data types only.
// cpu version for comparison
void UChar2Float(unsigned char *a, float *b, const int n){
for(int i=0;i<n;i++)
b[i] = (float)a[i];
}
__global__ void UChar2FloatKernel1(float *out, const unsigned char *in, int nElem){
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i<nElem) out[i] = (float) in[i];
}
__global__ void UChar2FloatKernel2(float4 *out, const uchar4 *in, int nElem){
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i<nElem) {
uchar4 ival = in[i]; // 32 bit load
float4 oval = make_float4(ival.x, ival.y, ival.z, ival.w);
out[i] = oval; // 128 bit store
}
}
__global__ void UChar2FloatKernel3(float *out, const unsigned char *in, int nElem) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<nElem; i+=gridDim.x*blockDim.x)
{
out[i] = (float) in[i];
}
}
__global__ void UChar2FloatKernel4(float4 *out, const uchar4 *in, int nElem) {
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<nElem; i+=gridDim.x*blockDim.x)
{
uchar4 ival = in[i]; // 32 bit load
float4 oval = make_float4(ival.x, ival.y, ival.z, ival.w);
out[i] = oval; // 128 bit store
}
}
On my Geforce GT 640, here are the timing results:
simpleKernel (cpu): 0.101463 seconds.
simpleKernel 1 (gpu): 0.007845 seconds.
simpleKernel 2 (gpu): 0.004914 seconds.
simpleKernel 3 (gpu): 0.005461 seconds.
simpleKernel 4 (gpu): 0.005461 seconds.
So we can see kernel2 which utilizes vector types only, is the winner. I have done these tests for (32 * 1024 * 768) elements. nvprof output is also shown below:
Time(%) Time Calls Avg Min Max Name
91.68% 442.45ms 4 110.61ms 107.43ms 119.51ms [CUDA memcpy DtoH]
3.76% 18.125ms 1 18.125ms 18.125ms 18.125ms [CUDA memcpy HtoD]
1.43% 6.8959ms 1 6.8959ms 6.8959ms 6.8959ms UChar2FloatKernel1(float*, unsigned char const *, int)
1.10% 5.3315ms 1 5.3315ms 5.3315ms 5.3315ms UChar2FloatKernel3(float*, unsigned char const *, int)
1.04% 5.0184ms 1 5.0184ms 5.0184ms 5.0184ms UChar2FloatKernel4(float4*, uchar4 const *, int)
0.99% 4.7816ms 1 4.7816ms 4.7816ms 4.7816ms UChar2FloatKernel2(float4*, uchar4 const *, int)

You can decorate the input array by the const __restrict__ qualifiers which notifies the compiler that the data is read-only and not aliased by any other pointer. In this way, the compiler will detect that the access is uniform and can optimise it by using one of the read-only caches (the constant cache or, on compute capability >=3.5, read-only data cache known as texture cache).
You can also decorate the output array by the __restrict__ qualifier to suggest the compiler other optimizations.
Finally, the recommendation by DarkZeros is worth to be followed.

You better write a vectorized version of your code, writing float4 into out at once.
this should be pretty straightforward in case nElem happens to be a boundary of 4-multiple, otherwise, u might need to mind a residue.

Related

Can an array of shorts be passed into a CUDA kernel

I have written a CUDA kernel and when I copy of an array of shorts to device memory and then pass it to the kernel it doesn't work. The simplified code below expresses my issue.
KernelCaller()
{
const int size = 1;
short hostArray[size]{41};
short* devPointer;
cudaMalloc((void**)&devicePointer, size * sizeof(short));
cudaMemcpy(devPointer, hostArray, size * sizeof(short), cudaMemcpyHostToDevice);
cudaKernel<<<1,1>>>(devPointer);
}
__global__
void cudaKernel(short* arr)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
short val = arr[idx];
}
At this point the value of val is 1063714857 and what I want it to be is 41.
I assume the issue is 41 in hex is 0x29 and the value I have is 0x3F670029 so it looks like it read too many bytes cause the 0x29 is at the beginning. When I switch to an array of floats it works perfectly, but I was trying to save memory. Does CUDA not allow an array of shorts?
I have implemented your code and getting the output as expected.
Here's the code
#include<stdio.h>
__global__ void cudaKernel(short* arr)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
short val = arr[idx];
# if __CUDA_ARCH__>=200
printf("Inside kernel %d\n",val);
#endif
arr[idx] = val;
}
int main()
{
const int size = 1;
short hostArray[size]{41};
printf("Before kernel call %d\n",hostArray[0]);
short *devPointer;
cudaMalloc((void**)&devPointer, size * sizeof(short));
cudaMemcpy(devPointer, hostArray, size * sizeof(short), cudaMemcpyHostToDevice);
cudaKernel<<<1,1>>>(devPointer);
cudaMemcpy(hostArray, devPointer, size * sizeof(short), cudaMemcpyDeviceToHost);
printf("After kernel call %d\n",hostArray[0]);
cudaFree(devPointer);
return 0;
}
And the output is
Before kernel call 41
Inside kernel 41
After kernel call 41
So, yes we can pass array of shorts into a CUDA kernel.

Optimizing memory access for complex numbers

I have a kernel that operates on complex numbers, and I am loading the values like this:
thrust::complex<float> x = X[tIdx];
where X is in global memory. When I profile this kernel with nvvp, I find that it is memory bandwidth-limited and the profiler suggests that I improve the memory access pattern:
Global Load L2 Transactions/Access=8, Ideal Transactions/Access=4
The disassembly confirms that this line is indeed split into two 32-bit loads, producing a strided access pattern:
LDG.E R9, [R16];
LDG.E R11, [R16+0x4];
How can I get this to compile into a single 64-bit load?
Potential solutions
I realize this is pretty closely related to this earlier question but the proposed solutions (change the global memory layout or use shared memory) seem less ideal than a 64-bit load.
The NVidia developer blog suggests reinterpret_cast to a vector data type such as float2, but I'm a little hazy about how this fits in with pointer aliasing rules.
I must also confess that this is somewhat of a theoretical question. For this particular kernel, I'm limited by the device memory bandwidth, so halving the # of L2 transactions shouldn't significantly improve the overall performance. But I anticipate working with more complex numbers in my future, and if there's a simple solution then I'd like to start using it now.
The basic problem here is that the compiler seems to need explicit alignment specifications for a type before it will generate vector load and store instructions. Consider the following trivial example:
class __align__(8) cplx0
{
public:
__device__ __host__ cplx0(float _re, float _img) : re(_re), img(_img) {};
float re, img;
};
class cplx1
{
public:
__device__ __host__ cplx1(float _re, float _img) : re(_re), img(_img) {};
float re, img;
};
template<typename T>
__global__ void memsetkernel(T* out, const T val, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
#pragma unroll 8
for(; tid < N; tid += stride) out[tid] = val;
}
template<typename T>
__global__ void memcpykernel(const T* __restrict__ in, T* __restrict__ out, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
#pragma unroll 8
for(; tid < N; tid += stride) out[tid] = in[tid];
}
template<typename T>
void memcpy(const T* in, T* out, int Nitems)
{
int nthreads = 1024;
int nblocks = 13 * 2; // GTX 970 with 13 SM
memcpykernel<T><<<nblocks, nthreads>>>(in, out, Nitems);
cudaDeviceSynchronize();
}
template<typename T>
void memset(T* in, const T value, int Nitems)
{
int nthreads = 1024;
int nblocks = 13 * 2; // GTX 970 with 13 SM
memsetkernel<T><<<nblocks, nthreads>>>(in, value, Nitems);
cudaDeviceSynchronize();
}
int main(void)
{
const int Nitems = 1 << 24;
typedef cplx0 fcomplex0;
typedef cplx1 fcomplex1;
{
fcomplex0* in;
fcomplex0* out;
cudaMalloc((void **)&in, Nitems * sizeof(fcomplex0));
cudaMalloc((void **)&out, Nitems * sizeof(fcomplex1));
for(int i=0; i<10; i++) {
memset<fcomplex0>(in, fcomplex0(1.0f,1.0f), Nitems);
memcpy<fcomplex0>(in, out, Nitems);
}
cudaFree(in);
cudaFree(out);
}
{
fcomplex1* in;
fcomplex1* out;
cudaMalloc((void **)&in, Nitems * sizeof(fcomplex1));
cudaMalloc((void **)&out, Nitems * sizeof(fcomplex1));
for(int i=0; i<10; i++) {
memset<fcomplex1>(in, fcomplex1(1.0f,1.0f), Nitems);
memcpy<fcomplex1>(in, out, Nitems);
cudaDeviceSynchronize();
}
cudaFree(in);
cudaFree(out);
}
cudaDeviceReset();
return 0;
}
Here we has two home-baked complex types, one with explicit alignment specifications, and one without. Otherwise they are identical. Putting them through a naïve mempcy and memset kernels in this test harness allows us to inspect the code generation behaviour of the toolchain for each type and benchmark the performance.
Firstly, the code. For cplx0 class, which has explicit 8-byte alignment, the compiler emits vectorized loads and stores in both kernels:
memcpykernel
ld.global.nc.v2.f32 {%f5, %f6}, [%rd17];
st.global.v2.f32 [%rd18], {%f5, %f6};
memsetkernel
st.global.v2.f32 [%rd11], {%f1, %f2};
whereas for the cplx1 case, it does not:
memcpykernel
ld.global.nc.f32 %f1, [%rd16];
ld.global.nc.f32 %f2, [%rd16+4];
st.global.f32 [%rd15+4], %f2;
st.global.f32 [%rd15], %f1;
memsetkernel
st.global.f32 [%rd11+4], %f2;
st.global.f32 [%rd11], %f1;
Looking at performance, there is a non-trivial difference in performance for the memset case (CUDA 8 release toolkit, GTX 970 with Linux 367.48 driver):
$ nvprof ./complex_types
==29074== NVPROF is profiling process 29074, command: ./complex_types
==29074== Profiling application: ./complex_types
==29074== Profiling result:
Time(%) Time Calls Avg Min Max Name
33.04% 19.264ms 10 1.9264ms 1.9238ms 1.9303ms void memcpykernel<cplx1>(cplx1 const *, cplx1*, int)
32.72% 19.080ms 10 1.9080ms 1.9055ms 1.9106ms void memcpykernel<cplx0>(cplx0 const *, cplx0*, int)
19.15% 11.165ms 10 1.1165ms 1.1120ms 1.1217ms void memsetkernel<cplx1>(cplx1*, cplx1, int)
15.09% 8.7985ms 10 879.85us 877.67us 884.13us void memsetkernel<cplx0>(cplx0*, cplx0, int)
The Thrust templated complex type does not have an explicit alignment definition (although it potentially could via specialization, although that would somewhat defeat the purpose). So your only choice here is to either make your own version of the Thrust type with explicit alignment, or use another complex type which does (like the cuComplex type which CUBLAS and CUFFT use).

Compare Thrust fill with kernel launch speed [duplicate]

I want to add 128-bit vectors with carry. My 128-bit version (addKernel128 in the code below) is twice slower than the basic 32-bit version (addKernel32 below).
Do I have memory coalescing problems ? How can I get better performance ?
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#define UADDO(c, a, b) asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
#define UADDC(c, a, b) asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
__global__ void addKernel32(unsigned int *c, const unsigned int *a, const unsigned int *b, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void addKernel128(unsigned *c, const unsigned *a, const unsigned *b, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size / 4)
{
uint4 a4 = ((const uint4 *)a)[tid],
b4 = ((const uint4 *)b)[tid],
c4;
UADDO(c4.x, a4.x, b4.x)
UADDC(c4.y, a4.y, b4.y) // add with carry
UADDC(c4.z, a4.z, b4.z) // add with carry
UADDC(c4.w, a4.w, b4.w) // add with carry (no overflow checking for clarity)
((uint4 *)c)[tid] = c4;
tid += blockDim.x * gridDim.x;
}
}
int main()
{
const int size = 10000000; // 10 million
unsigned int *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, size * sizeof(int));
cudaMalloc((void**)&d_b, size * sizeof(int));
cudaMalloc((void**)&d_c, size * sizeof(int));
cudaMemset(d_a, 1, size * sizeof(int)); // dummy init just for the example
cudaMemset(d_b, 2, size * sizeof(int)); // dummy init just for the example
cudaMemset(d_c, 0, size * sizeof(int));
int nbThreads = 512;
int nbBlocks = 1024; // for example
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
addKernel128<<<nbBlocks, nbThreads>>>(d_c, d_a, d_b, size);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float m = 0;
cudaEventElapsedTime(&m, start, stop);
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
cudaDeviceReset();
printf("Elapsed = %g\n", m);
return 0;
}
Timing CUDA code on a WDDM GPU can be quite difficult for a variety of reasons. Most of these revolve around the fact that the GPU is being managed as a display device by Windows, and this can introduce a variety of artifacts into the timing. One example is that the windows driver and WDDM will batch work for the GPU, and may interleave display work in the middle of CUDA GPU work.
if possible, time your cuda code on linux, or else on a windows GPU
in TCC mode.
for performance, always build without the -G switch. In visual studio, this usually corresponds to building the release, not the debug version of the project.
To get a good performance comparison, it's usually advisable to do some "warm up runs" before actually measuring the timing results. These will eliminate "start-up" and other one-time measurement issues, are you are more likely to get sensible results. You may also wish to run your code a number of times and average the results.
It's also usually advisable to compile with an arch flag that corresponds to your GPU, so for example -arch=sm_20 for a cc2.0 GPU.

Result of reduction#1 is wrong

I implemented the reduction#1 form the well-known slides by Mark Harris, but I obtain 0 as result. I filled the input array with the same values shown in the slides. I compiled with cuda 7.0 using the command nvcc reduction1.cu -o red1. Where is the mistake? Thanks.
#include <stdio.h>
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 16
__global__ void reduce1(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2)
{
if (tid % (2*s) == 0) sdata[tid] += sdata[tid + s];
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main()
{
int inputLength=16;
int hostInput[16]={10,1,8,-1,0,-2,3,5,-2,-3,2,7,0,11,0,2};
int hostOutput=0;
int *deviceInput;
int *deviceOutput;
cudaMalloc((void **)&deviceInput, inputLength * sizeof(int));
cudaMalloc((void **)&deviceOutput, sizeof(int));
cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(int),cudaMemcpyHostToDevice);
reduce1<<<1,THREADS_PER_BLOCK>>>(deviceInput, deviceOutput);
cudaDeviceSynchronize();
cudaMemcpy(&hostOutput, deviceOutput,sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
}
As talonmies said, you are using dynamic shared memory, but you are not allocating any memory space for it. You have to specify the size of this memory as the third argument of your kernel execution configuration.
reduce1<<<1, THREADS_PER_BLOCK, 64>>>(deviceInput, deviceOutput);
^^
Another way to fix this code is to use static shared memory. Declare your shared memory like this:
__shared__ int sdata[16];
Please read this before asking questions for CUDA.

is there a way to do "saypx" in cuBLAS?

cublasSaxpy computes y' = a * x + y, where x and y are vectors and a is scalar.
It turns out I need to compute y' = a * y + x instead. I'm not seeing how to twist the cuBLAS library into doing that.
(Of course, I could compute y' = a * y, then y' = y' + x, but y' is read too often in that case. And I could write my own CUDA code to do it, but then it's likely not anywhere near as fast as the cuBLAS code. I'm just surprised there's no apparent way to do "saypx" directly.)
[Added] There are functions similar to "saxpby" in Intel's version of cblas, which would do what I need. But oddly enough, that's not in cuBLAS.
[Added #2] It looks like I can use the cudnnAddTensor function, with some aliasing of descriptors (I have a FilterDescriptor that points to the tensor, which AddTensor won't accept, but I should be able to alias a TensorDescriptor to the same memory and shape.)
There isn't a way I am aware of to do what you are asking in CUBLAS, nor in standard BLAS. What you have found in MKL is an extension added by Intel, but I don't recall seeing something similar in other host and accelerator BLAS implementations.
The good news is that your assertion that "I could write my own CUDA code to do it, but then it's likely not anywhere near as fast as the cuBLAS code", is untrue, at least for an operation as trivial as saxpy. Even a naïve implementation of saxpy will get very close to CUBLAS because there really aren't that many was to read two arrays, perform an FMAD and write back the result. As long as you get memory coalescing correct, it is pretty simple to write performant code. For example:
#include <vector>
#include <algorithm>
#include <cassert>
#include <iostream>
#include <cmath>
#include "cublas_v2.h"
typedef enum
{
AXPY = 0,
AXPBY = 1
} saxpy_op_t;
__device__ __host__ __inline__
float axpby_op(float y, float x, float a)
{
return a * y + x;
}
__device__ __host__ __inline__
float axpy_op(float y, float x, float a)
{
return y + a * x;
}
template<typename T>
class pitched_accessor
{
T * p;
size_t pitch;
public:
__host__ __device__
pitched_accessor(T *p_, size_t pitch_) : p(p_), pitch(pitch_) {};
__host__ __device__
T& operator[](size_t idx) { return p[pitch*idx]; };
__host__ __device__
const T& operator[](size_t idx) const { return p[pitch*idx]; };
};
template<saxpy_op_t op>
__global__
void saxpy_kernel(pitched_accessor<float> y, pitched_accessor<float> x,
const float a, const unsigned int N1)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
#pragma unroll 8
for(; idx < N1; idx += stride) {
switch (op) {
case AXPY:
y[idx] = axpy_op(y[idx], x[idx], a);
break;
case AXPBY:
y[idx] = axpby_op(y[idx], x[idx], a);
break;
}
}
}
__host__ void saxby(const unsigned int N, const float a,
float *x, int xinc, float *y, int yinc)
{
int gridsize, blocksize;
cudaOccupancyMaxPotentialBlockSize(&gridsize, &blocksize, saxpy_kernel<AXPBY>);
saxpy_kernel<AXPBY><<<gridsize, blocksize>>>(pitched_accessor<float>(y, yinc),
pitched_accessor<float>(x, xinc), a, N);
}
__host__ void saxpy(const unsigned int N, const float a,
float *x, int xinc, float *y, int yinc)
{
int gridsize, blocksize;
cudaOccupancyMaxPotentialBlockSize(&gridsize, &blocksize, saxpy_kernel<AXPY>);
saxpy_kernel<AXPY><<<gridsize, blocksize>>>(pitched_accessor<float>(y, yinc),
pitched_accessor<float>(x, xinc), a, N);
}
void check_result(std::vector<float> &yhat, float result, float tolerance=1e-5f)
{
auto it = yhat.begin();
for(; it != yhat.end(); ++it) {
float err = std::fabs(*it - result);
assert( err < tolerance );
}
}
int main()
{
const int N = 1<<22;
std::vector<float> x_h(N);
std::vector<float> y_h(N);
const float a = 2.f, y0 = 1234.f, x0 = 532.f;
std::fill(y_h.begin(), y_h.end(), y0);
std::fill(x_h.begin(), x_h.end(), x0);
float *x_d, *y_d;
size_t sz = sizeof(float) * size_t(N);
cudaMalloc((void **)&x_d, sz);
cudaMalloc((void **)&y_d, sz);
cudaMemcpy(x_d, &x_h[0], sz, cudaMemcpyHostToDevice);
{
cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice);
saxby(N, a, x_d, 1, y_d, 1);
std::vector<float> yhat(N);
cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost);
check_result(yhat, axpby_op(y0, x0, a));
}
{
cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice);
saxpy(N, a, x_d, 1, y_d, 1);
std::vector<float> yhat(N);
cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost);
check_result(yhat, axpy_op(y0, x0, a));
}
{
cublasHandle_t handle;
cublasCreate(&handle);
cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice);
cublasSaxpy(handle, N, &a, x_d, 1, y_d, 1);
std::vector<float> yhat(N);
cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost);
check_result(yhat, axpy_op(y0, x0, a));
cublasDestroy(handle);
}
return int(cudaDeviceReset());
}
This demonstrates that a very simple axpy kernel can be easily adapted to perform both the standard operation and the version you want, and run within 10% of the runtime of CUBLAS on the compute 5.2 device I tested it on:
$ nvcc -std=c++11 -arch=sm_52 -Xptxas="-v" -o saxby saxby.cu -lcublas
ptxas info : 0 bytes gmem
ptxas info : Compiling entry function '_Z12saxpy_kernelIL10saxpy_op_t0EEv16pitched_accessorIfES2_fj' for 'sm_52'
ptxas info : Function properties for _Z12saxpy_kernelIL10saxpy_op_t0EEv16pitched_accessorIfES2_fj
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 17 registers, 360 bytes cmem[0]
ptxas info : Compiling entry function '_Z12saxpy_kernelIL10saxpy_op_t1EEv16pitched_accessorIfES2_fj' for 'sm_52'
ptxas info : Function properties for _Z12saxpy_kernelIL10saxpy_op_t1EEv16pitched_accessorIfES2_fj
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 17 registers, 360 bytes cmem[0]
$ nvprof ./saxby
==26806== NVPROF is profiling process 26806, command: ./saxby
==26806== Profiling application: ./saxby
==26806== Profiling result:
Time(%) Time Calls Avg Min Max Name
54.06% 11.190ms 5 2.2381ms 960ns 2.9094ms [CUDA memcpy HtoD]
40.89% 8.4641ms 3 2.8214ms 2.8039ms 2.8310ms [CUDA memcpy DtoH]
1.73% 357.59us 1 357.59us 357.59us 357.59us void saxpy_kernel<saxpy_op_t=1>(pitched_accessor<float>, pitched_accessor<float>, float, unsigned int)
1.72% 355.15us 1 355.15us 355.15us 355.15us void saxpy_kernel<saxpy_op_t=0>(pitched_accessor<float>, pitched_accessor<float>, float, unsigned int)
1.60% 332.21us 1 332.21us 332.21us 332.21us void axpy_kernel_val<float, int=0>(cublasAxpyParamsVal<float>)