why nvcc remove my if branch during compiling? - cuda

I found a strange behaviour when compiling cuda code to ptx. If global function using return value from tex2DLod<uchar4> calls a device function with if-statement whose both branches contain a device function using uchar4 as argument, the resulting ptx file only has the code from else branch.
An example is here. I compiled the following code with both cuda 10.1 update 1 and update2. Result is always the same. When I remove the if statement and only put the else part there. The resulting ptx never changes which means the first branch has lost.
#include <stdint.h>
#include <cuda.h>
__device__ float3 rgba2rgb(uchar4 p)
{
return make_float3(p.x/255.0f, p.y/255.0f, p.z/255.0f);
}
__device__ float3 bgra2rgb(uchar4 p)
{
return make_float3(p.z/255.0f, p.y/255.0f, p.x/255.0f);
}
__device__ float3 pixel2rgb(uchar4 p, bool flag)
{
if(flag)
{
return bgra2rgb(p);
}
else
{
return rgba2rgb(p);
}
}
extern "C" __global__ void func2(
CUtexObject rgb_mip_texture,
size_t width, size_t height,
bool flag
)
{
size_t x_p = blockIdx.x * blockDim.x + threadIdx.x;
size_t y_p = blockIdx.y * blockDim.y + threadIdx.y;
if (x_p >= width || y_p >= height)
return;
uchar4 pixel = tex2DLod<uchar4>(rgb_mip_texture, x_p, y_p, (float)0);
//uchar4 pixel = make_uchar4(1, 2, 3, 4);
float3 rgb = pixel2rgb(pixel, flag);
printf("rgb=(%f,%f,%f)", rgb.x, rgb.y, rgb.z);
}
the nvcc command ccbin is clang 8.0.
/usr/bin/nvcc -ptx \
-v --ptxas-options=-v \
--compiler-options "-v" \
-ccbin "${ccbin}" \
"${input_file}" \
-o "${ptx_file}"
If the pixel is not from tex2DLod (for example from a make_uchar4) then both branches are preserved. Is this a known bug in nvcc?

This would appear to be a bug in nvcc 10.1 (the only version I have tested). It appears that the compiler attempts at automatic inline expansion of the rgba2rgb and bgra2rgb functions are breaking somehow, so that the result of compiling this:
__device__ float3 pixel2rgb(uchar4 p, bool flag)
{
if(flag)
{
return bgra2rgb(p);
}
else
{
return rgba2rgb(p);
}
}
is effectively this:
__device__ float3 pixel2rgb(uchar4 p, bool flag)
{
return rgba2rgb(p);
}
It isn't related to textures per se, because I can reproduce the problem with this code reading directly from global memory:
#include <stdint.h>
#include <cuda.h>
#include <cstdio>
__device__ float3 rgba2rgb(uchar4 p)
{
return make_float3(p.x/255.0f, p.y/255.0f, p.z/255.0f);
}
__device__ float3 bgra2rgb(uchar4 p)
{
return make_float3(p.z/255.0f, p.y/255.0f, p.x/255.0f);
}
__device__ float3 pixel2rgb(uchar4 p, bool flag)
{
if(flag)
{
return bgra2rgb(p);
}
else
{
return rgba2rgb(p);
}
}
__global__ void func2(
uchar4* pixels,
size_t width, size_t height,
bool flag
)
{
size_t x_p = blockIdx.x * blockDim.x + threadIdx.x;
size_t y_p = blockIdx.y * blockDim.y + threadIdx.y;
if ((x_p < width) && (y_p < height)) {
size_t idx = x_p * width + y_p;
uchar4 pixel = pixels[idx];
float3 rgb = pixel2rgb(pixel, flag);
printf("flag=%d idx=%ld rgb=(%f,%f,%f)\n", flag, idx, rgb.x, rgb.y, rgb.z);
}
}
int main()
{
int width = 2, height = 2;
uchar4* data;
cudaMallocManaged(&data, width * height * sizeof(uchar4));
data[0] = make_uchar4(1, 2, 3, 4);
data[1] = make_uchar4(2, 3, 4, 5);
data[2] = make_uchar4(3, 4, 5, 6);
data[3] = make_uchar4(4, 5, 6, 7);
dim3 bdim(2,2);
func2<<<1, bdim>>>(data, width, height, true);
cudaDeviceSynchronize();
func2<<<1, bdim>>>(data, width, height, false);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
$ nvcc -arch=sm_52 -o wangwang wangwang.cu
$ ./wangwang
flag=1 idx=0 rgb=(0.003922,0.007843,0.011765)
flag=1 idx=2 rgb=(0.011765,0.015686,0.019608)
flag=1 idx=1 rgb=(0.007843,0.011765,0.015686)
flag=1 idx=3 rgb=(0.015686,0.019608,0.023529)
flag=0 idx=0 rgb=(0.003922,0.007843,0.011765)
flag=0 idx=2 rgb=(0.011765,0.015686,0.019608)
flag=0 idx=1 rgb=(0.007843,0.011765,0.015686)
flag=0 idx=3 rgb=(0.015686,0.019608,0.023529)
I presume that the make_uchar4 version you mention works because the compiler will do pre-computation of the results due to the constant inputs and eliminate the conversion function code all together.
Playing around, I was able to fix this by changing the code like this:
__device__ __inline__ float3 rgba2rgb(uchar4 p)
{
return make_float3(p.x/255.0f, p.y/255.0f, p.z/255.0f);
}
__device__ __inline__ float3 bgra2rgb(uchar4 p)
{
return make_float3(p.z/255.0f, p.y/255.0f, p.x/255.0f);
}
When I do this, the compile injects some swizzling logic into the inline PTX expansion it generates:
ld.global.v4.u8 {%rs2, %rs3, %rs4, %rs5}, [%rd10];
and.b16 %rs8, %rs1, 255; <---- %rs1 is the input bool
setp.eq.s16 %p4, %rs8, 0;
selp.b16 %rs9, %rs2, %rs4, %p4;
and.b16 %rs10, %rs9, 255;
selp.b16 %rs11, %rs4, %rs2, %p4;
and.b16 %rs12, %rs11, 255;
and things work correctly (your mileage may vary):
$ nvcc -arch=sm_52 -o wangwang wangwang.cu
$ ./wangwang
flag=1 idx=0 rgb=(0.011765,0.007843,0.003922)
flag=1 idx=2 rgb=(0.019608,0.015686,0.011765)
flag=1 idx=1 rgb=(0.015686,0.011765,0.007843)
flag=1 idx=3 rgb=(0.023529,0.019608,0.015686)
flag=0 idx=0 rgb=(0.003922,0.007843,0.011765)
flag=0 idx=2 rgb=(0.011765,0.015686,0.019608)
flag=0 idx=1 rgb=(0.007843,0.011765,0.015686)
flag=0 idx=3 rgb=(0.015686,0.019608,0.023529)
I would report this as a bug to NVIDIA.

Related

Does bool variable in kernel need to be synchronized

I have a kernel consisting of a for loop that searches through an array for a specific int value. I'm using a grid block of 256 threads to do this. However, when one thread finds the value, I want to let the other threads know to exit. Currently I'm using a boolean flag, but I'm not sure if its working properly. My concern is synchronization.
__device__ bool found;
__global__
void search()
{
for(int i = threadIdx.x; i<1000000; i += stride)
{
if(found == true)
{
break;
}
else if(arr[i] = x)
{
found = true;
break;
}
}
}
int main()
{
bool flag = false;
cudaMemcpyToSymbol(found, &flag, sizeof(bool), 0,cudaMemcpyHostToDevice);
}
As pointed out in comments, you can probably achieve what you want by declaring the global device flag to be volatile, which will inhibit caching, and by using a memory fence function. There really isn't a global synchronization primitive which would do want you want other than the new grid synchronization mechanism introduced in CUDA 9 and new hardware, but that probably isn't necessary in this case. Turning your pseudocode into a toy example:
#include <iostream>
#include <thrust/device_vector.h>
__device__ volatile bool found;
__device__ volatile size_t idx;
template<bool docheck>
__global__
void search(const int* arr, int x, size_t N)
{
size_t i = threadIdx.x + blockIdx.x * blockDim.x;
size_t stride = blockDim.x * gridDim.x;
for(; (i<N) && (!found); i += stride)
{
if(arr[i] == x)
{
if (docheck) found = true;
idx = i;
__threadfence();
break;
}
}
}
int main()
{
const size_t N = 1 << 24;
const size_t findidx = 280270;
const int findval = 0xdeadbeef;
thrust::device_vector<int> data(N,1);
data[findidx] = findval;
bool flag = false;
size_t zero = 0;
{
cudaMemcpyToSymbol(found, &flag, sizeof(bool));
cudaMemcpyToSymbol(idx, &zero, sizeof(size_t));
int blocks, threads;
cudaOccupancyMaxPotentialBlockSize(&blocks, &threads, search<false>);
search<false><<<blocks, threads>>>(thrust::raw_pointer_cast(data.data()), findval, N);
cudaDeviceSynchronize();
size_t result = 0;
cudaMemcpyFromSymbol(&result, idx, sizeof(size_t));
std::cout << "result = " << result << std::endl;
}
{
cudaMemcpyToSymbol(found, &flag, sizeof(bool));
cudaMemcpyToSymbol(idx, &zero, sizeof(size_t));
int blocks, threads;
cudaOccupancyMaxPotentialBlockSize(&blocks, &threads, search<true>);
search<true><<<blocks, threads>>>(thrust::raw_pointer_cast(data.data()), findval, N);
cudaDeviceSynchronize();
size_t result = 0;
cudaMemcpyFromSymbol(&result, idx, sizeof(size_t));
std::cout << "result = " << result << std::endl;
}
return 0;
}
and profiling it gives the following:
$ nvcc -arch=sm_52 -o notify notify.cu
$ nvprof ./notify
==3916== NVPROF is profiling process 3916, command: ./notify
result = 280270
result = 280270
==3916== Profiling application: ./notify
==3916== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 78.00% 1.6773ms 1 1.6773ms 1.6773ms 1.6773ms void search<bool=0>(int const *, int, unsigned long)
19.93% 428.63us 1 428.63us 428.63us 428.63us void thrust::cuda_cub::core::_kernel_agent<thrust::cuda_cub::__parallel_for::ParallelForAgent<thrust::cuda_cub::__uninitialized_fill::functor<thrust::device_ptr<int>, int>, unsigned long>, thrust::cuda_cub::__uninitialized_fill::functor<thrust::device_ptr<int>, int>, unsigned long>(thrust::device_ptr<int>, int)
1.82% 39.199us 1 39.199us 39.199us 39.199us void search<bool=1>(int const *, int, unsigned long)
As you can see, the version which sets the found flag completes the search in 40 microseconds, whereas the version which does not set the flag takes 1.7 milliseconds. Given that the kernel is run with the maximum number of resident blocks in both cases, we can conclude that the early exit mechanism worked correctly and running blocks detected that the required value had been found.

CUDA (from C++) Hyperbolic Trig Functions Calculate Different Results in Different Locations

I'm running into an odd issue with a simulation I wrote. I recently restructured my code to make things cleaner and more organized. Basically (among other things) I moved (basically copy-pasted) the CUDA function in question to another file. This function uses asinh to compute something, as well as sinh and cosh. What I've noticed is that before the move, the function produced expected results consistent with hand calculated values (in excel). After the move, the hyperbolic functions are fed the same inputs, yet the results are significantly different (up to 10% in asinh, 0.5% in sinh). This effectively breaks my simulation. I am confident in the rest of the function.
EDIT:
Upon further testing, I've found hard-coding values for the angle (lambdaDegrees) in question - namely double x{ asinh(sqrt(3.0) * sin(lambdaDegrees * 3.1415927 / 180.0)) }; - produces the (good) expected results. Measuring the angle before and after the equation is executed, the angle is unchanged, yet without hard-coding the value, it produces the wrong results. The weirdest part is simply adding another diagnostic printf function caused the function to produce yet another (wrong) result. I'm wondering if it has anything to do with the way I've set up a callback function on the GPU...maybe multiple threads using the function at the same time leading to some (consistent) undefined behavior?
After a bit of screwing around with the code, I reproduced the error. Expected value of x within getSAtLambda (the printf statement) is 1.268... Result is 1.768... Let me know what you think.
main.cu
//CUDA includes
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
typedef double(*callbackFcn)(double*, int, double, double, int);
//on GPU global variables
extern __device__ double* fieldConstArray_GPU;
extern __device__ int arraySize_GPU;
extern __device__ callbackFcn callback_GPU;
__host__ __device__ double BFieldAtS(double* consts, int arrayLength, double s, double simtime, int thdInd);
__host__ __device__ double gradBAtS(double* consts, int arrayLength, double s, double simtime, int thdInd);
__global__ void setupEnvironmentGPU(double* constArrayPtr);
__global__ void execute()
{
int thdInd{ blockIdx.x * blockDim.x + threadIdx.x };
callback_GPU(fieldConstArray_GPU, arraySize_GPU, (thdInd == 31487) ? 1233005.097 : ((115200 - thdInd) / 50000.0 * 6.371e6), 0.0, thdInd ); //3rd argument are example values
}
void setupEnvironment()
{// consts: [ B0, ILATDeg, L, L_norm, s_max ]
double fieldConstArray_h[]{ 3.12e-5, 72.0, 66717978.17, 10.47213595, 85670894.1 };
double* fieldConstants_d{ nullptr };
cudaMalloc((void **)&fieldConstants_d, 5 * sizeof(double));
cudaMemcpy(fieldConstants_d, fieldConstArray_h, 5 * sizeof(double), cudaMemcpyHostToDevice);
setupEnvironmentGPU <<< 1, 1 >>> (fieldConstants_d);
}
int main()
{
setupEnvironment();
int loops{ 0 };
while (loops < 3)
{
execute <<< 115200 / 256, 256 >>> ();
cudaDeviceSynchronize();
loops++;
}
return 0;
}
otherfunctions.cu
#include <cmath>
#include <iostream>
//CUDA includes
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
typedef double(*callbackFcn)(double*, int, double, double, int);
__device__ double* fieldConstArray_GPU{ nullptr };
__device__ int arraySize_GPU{ 7 };
__device__ callbackFcn callback_GPU{ nullptr };
__host__ __device__ double getSAtLambda(double* consts, int arrayLength, double lambdaDegrees, double simtime, int thdInd)
{//returns s in units of L
double x{ asinh(sqrt(3.0) * sin(lambdaDegrees * 3.1415927 / 180.0)) };
if (simtime == 0.0 && thdInd == 31487) { printf("\n\ngetSAtLambda: %f, %f\n\n", lambdaDegrees, x); }
return (0.5 * consts[2] / sqrt(3.0)) * (x + sinh(x) * cosh(x));
}
__host__ __device__ double getLambdaAtS(double* consts, int arrayLength, double s, double simtime, int thdInd)
{// consts: [ B0, ILATDeg, L, L_norm, s_max, ds, errorTolerance ]
double lambda_tmp{ (-consts[1] / consts[4]) * s + consts[1] }; //-ILAT / s_max * s + ILAT
double s_tmp{ consts[4] - getSAtLambda(consts, arrayLength, lambda_tmp, simtime, thdInd) };
double dlambda{ 1.0 };
bool over{ 0 };
while (abs((s_tmp - s) / s) > 1e-4) //errorTolerance
{
while (1)
{
over = (s_tmp >= s);
if (over)
{
lambda_tmp += dlambda;
s_tmp = consts[4] - getSAtLambda(consts, arrayLength, lambda_tmp, simtime, 0);
if (s_tmp < s)
break;
}
else
{
lambda_tmp -= dlambda;
s_tmp = consts[4] - getSAtLambda(consts, arrayLength, lambda_tmp, simtime, 0);
if (s_tmp >= s)
break;
}
}
if (dlambda < 1e-4 / 100.0) //errorTolerance
break;
dlambda /= 5.0; //through trial and error, this reduces the number of calculations usually (compared with 2, 2.5, 3, 4, 10)
}
return lambda_tmp;
}
__host__ __device__ double BFieldAtS(double* consts, int arrayLength, double s, double simtime, int thdInd)
{// consts: [ B0, ILATDeg, L, L_norm, s_max, ds, errorTolerance ]
double lambda_deg{ getLambdaAtS(consts, arrayLength, s, simtime, thdInd) };
double lambda_rad{ lambda_deg * 3.1415927 / 180.0 };
double rnorm{ consts[3] * pow(cos(lambda_rad), 2) };
return -consts[0] / pow(rnorm, 3) * sqrt(1.0 + 3 * pow(sin(lambda_rad), 2));
}
__host__ __device__ double gradBAtS(double* consts, int arrayLength, double s, double simtime, int thdInd)
{
return (BFieldAtS(consts, arrayLength, s + consts[5], simtime, thdInd) - BFieldAtS(consts, arrayLength, s - consts[5], simtime, thdInd)) / (2 * consts[5]);
}
__global__ void setupEnvironmentGPU(double* constArrayPtr)
{
callback_GPU = gradBAtS; //sets pointer to callback function
arraySize_GPU = 7;
fieldConstArray_GPU = constArrayPtr;
}
A summary of my findings:
On Cuda 8.0:
Correct results are produced when the code above:
Is compiled as debug instead of release (except for -O1)
When a trig identity of asinh is used instead of the actual asinh function
When the argument for asinh is hardcoded
With -O1 instead of -O2 for both release and debug
(Paradoxically) When the function getSAtLambda is called directly instead of through a function pointer
Incorrect results are produced for asinh(x) when:
Compiled as release with -O2 with a non-hardcoded value through a function pointer
Updating to CUDA 9.1 fixed the issue.

is there a way to do "saypx" in cuBLAS?

cublasSaxpy computes y' = a * x + y, where x and y are vectors and a is scalar.
It turns out I need to compute y' = a * y + x instead. I'm not seeing how to twist the cuBLAS library into doing that.
(Of course, I could compute y' = a * y, then y' = y' + x, but y' is read too often in that case. And I could write my own CUDA code to do it, but then it's likely not anywhere near as fast as the cuBLAS code. I'm just surprised there's no apparent way to do "saypx" directly.)
[Added] There are functions similar to "saxpby" in Intel's version of cblas, which would do what I need. But oddly enough, that's not in cuBLAS.
[Added #2] It looks like I can use the cudnnAddTensor function, with some aliasing of descriptors (I have a FilterDescriptor that points to the tensor, which AddTensor won't accept, but I should be able to alias a TensorDescriptor to the same memory and shape.)
There isn't a way I am aware of to do what you are asking in CUBLAS, nor in standard BLAS. What you have found in MKL is an extension added by Intel, but I don't recall seeing something similar in other host and accelerator BLAS implementations.
The good news is that your assertion that "I could write my own CUDA code to do it, but then it's likely not anywhere near as fast as the cuBLAS code", is untrue, at least for an operation as trivial as saxpy. Even a naïve implementation of saxpy will get very close to CUBLAS because there really aren't that many was to read two arrays, perform an FMAD and write back the result. As long as you get memory coalescing correct, it is pretty simple to write performant code. For example:
#include <vector>
#include <algorithm>
#include <cassert>
#include <iostream>
#include <cmath>
#include "cublas_v2.h"
typedef enum
{
AXPY = 0,
AXPBY = 1
} saxpy_op_t;
__device__ __host__ __inline__
float axpby_op(float y, float x, float a)
{
return a * y + x;
}
__device__ __host__ __inline__
float axpy_op(float y, float x, float a)
{
return y + a * x;
}
template<typename T>
class pitched_accessor
{
T * p;
size_t pitch;
public:
__host__ __device__
pitched_accessor(T *p_, size_t pitch_) : p(p_), pitch(pitch_) {};
__host__ __device__
T& operator[](size_t idx) { return p[pitch*idx]; };
__host__ __device__
const T& operator[](size_t idx) const { return p[pitch*idx]; };
};
template<saxpy_op_t op>
__global__
void saxpy_kernel(pitched_accessor<float> y, pitched_accessor<float> x,
const float a, const unsigned int N1)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
#pragma unroll 8
for(; idx < N1; idx += stride) {
switch (op) {
case AXPY:
y[idx] = axpy_op(y[idx], x[idx], a);
break;
case AXPBY:
y[idx] = axpby_op(y[idx], x[idx], a);
break;
}
}
}
__host__ void saxby(const unsigned int N, const float a,
float *x, int xinc, float *y, int yinc)
{
int gridsize, blocksize;
cudaOccupancyMaxPotentialBlockSize(&gridsize, &blocksize, saxpy_kernel<AXPBY>);
saxpy_kernel<AXPBY><<<gridsize, blocksize>>>(pitched_accessor<float>(y, yinc),
pitched_accessor<float>(x, xinc), a, N);
}
__host__ void saxpy(const unsigned int N, const float a,
float *x, int xinc, float *y, int yinc)
{
int gridsize, blocksize;
cudaOccupancyMaxPotentialBlockSize(&gridsize, &blocksize, saxpy_kernel<AXPY>);
saxpy_kernel<AXPY><<<gridsize, blocksize>>>(pitched_accessor<float>(y, yinc),
pitched_accessor<float>(x, xinc), a, N);
}
void check_result(std::vector<float> &yhat, float result, float tolerance=1e-5f)
{
auto it = yhat.begin();
for(; it != yhat.end(); ++it) {
float err = std::fabs(*it - result);
assert( err < tolerance );
}
}
int main()
{
const int N = 1<<22;
std::vector<float> x_h(N);
std::vector<float> y_h(N);
const float a = 2.f, y0 = 1234.f, x0 = 532.f;
std::fill(y_h.begin(), y_h.end(), y0);
std::fill(x_h.begin(), x_h.end(), x0);
float *x_d, *y_d;
size_t sz = sizeof(float) * size_t(N);
cudaMalloc((void **)&x_d, sz);
cudaMalloc((void **)&y_d, sz);
cudaMemcpy(x_d, &x_h[0], sz, cudaMemcpyHostToDevice);
{
cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice);
saxby(N, a, x_d, 1, y_d, 1);
std::vector<float> yhat(N);
cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost);
check_result(yhat, axpby_op(y0, x0, a));
}
{
cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice);
saxpy(N, a, x_d, 1, y_d, 1);
std::vector<float> yhat(N);
cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost);
check_result(yhat, axpy_op(y0, x0, a));
}
{
cublasHandle_t handle;
cublasCreate(&handle);
cudaMemcpy(y_d, &y_h[0], sz, cudaMemcpyHostToDevice);
cublasSaxpy(handle, N, &a, x_d, 1, y_d, 1);
std::vector<float> yhat(N);
cudaMemcpy(&yhat[0], y_d, sz, cudaMemcpyDeviceToHost);
check_result(yhat, axpy_op(y0, x0, a));
cublasDestroy(handle);
}
return int(cudaDeviceReset());
}
This demonstrates that a very simple axpy kernel can be easily adapted to perform both the standard operation and the version you want, and run within 10% of the runtime of CUBLAS on the compute 5.2 device I tested it on:
$ nvcc -std=c++11 -arch=sm_52 -Xptxas="-v" -o saxby saxby.cu -lcublas
ptxas info : 0 bytes gmem
ptxas info : Compiling entry function '_Z12saxpy_kernelIL10saxpy_op_t0EEv16pitched_accessorIfES2_fj' for 'sm_52'
ptxas info : Function properties for _Z12saxpy_kernelIL10saxpy_op_t0EEv16pitched_accessorIfES2_fj
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 17 registers, 360 bytes cmem[0]
ptxas info : Compiling entry function '_Z12saxpy_kernelIL10saxpy_op_t1EEv16pitched_accessorIfES2_fj' for 'sm_52'
ptxas info : Function properties for _Z12saxpy_kernelIL10saxpy_op_t1EEv16pitched_accessorIfES2_fj
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 17 registers, 360 bytes cmem[0]
$ nvprof ./saxby
==26806== NVPROF is profiling process 26806, command: ./saxby
==26806== Profiling application: ./saxby
==26806== Profiling result:
Time(%) Time Calls Avg Min Max Name
54.06% 11.190ms 5 2.2381ms 960ns 2.9094ms [CUDA memcpy HtoD]
40.89% 8.4641ms 3 2.8214ms 2.8039ms 2.8310ms [CUDA memcpy DtoH]
1.73% 357.59us 1 357.59us 357.59us 357.59us void saxpy_kernel<saxpy_op_t=1>(pitched_accessor<float>, pitched_accessor<float>, float, unsigned int)
1.72% 355.15us 1 355.15us 355.15us 355.15us void saxpy_kernel<saxpy_op_t=0>(pitched_accessor<float>, pitched_accessor<float>, float, unsigned int)
1.60% 332.21us 1 332.21us 332.21us 332.21us void axpy_kernel_val<float, int=0>(cublasAxpyParamsVal<float>)

OpenACC: calling cuda __device__ kernel from OpenACC parallel loop

If I have simple test cuda kernel in hello.cu file as:
extern "C" __device__ float radians( float f ){
return f*3.14159265;
}
And test OpenACC code in mainacc.c:
#include <stdio.h>
#include <stdlib.h>
#define N 10
#pragma acc routine seq
extern float radians( float );
int main() {
int i;
float *hptr, *dptr;
hptr = (float *) calloc(N, sizeof(float));
#pragma acc parallel loop copy(hptr[0:N])
for(i=0; i<N; i++) {
hptr[i] = radians(i*0.1f);
}
for( i=0; i< N; i++)
printf("\n %dth value : %f", i, hptr[i]);
return 0;
}
If I try to compile this code as below I get link time errors:
nvcc hello.cu -c
cc -hacc -hlist=a mainacc.c hello.o
nvlink error : Undefined reference to 'radians' in '/tmp/pe_20271//app_cubin_20271.omainacc_1.o__sec.cubin'
cuda_link: nvlink fatal error
I tried nvcc with "--relocatable-device-code true” option etc but no success. Loaded modules are:
craype-accel-nvidia35
cudatoolkit/6.5
PrgEnv-cray/5.2.40
Could you tell me correct way to use cuda device kernel within OpenACC?
I've been able to make this sort of mixing work with PGI, but I've not yet been able to produce a sample that works with the Cray compiler. Here's a simple example that works for PGI.
This is the file containing the CUDA.
// saxpy_cuda_device.cu
extern "C"
__device__
float saxpy_dev(float a, float x, float y)
{
return a * x + y;
}
This is the file containing OpenACC.
// openacc_cuda_device.cpp
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#pragma acc routine seq
extern "C" float saxpy_dev(float, float, float);
int main(int argc, char **argv)
{
float *x, *y, tmp;
int n = 1<<20, i;
x = (float*)malloc(n*sizeof(float));
y = (float*)malloc(n*sizeof(float));
#pragma acc data create(x[0:n]) copyout(y[0:n])
{
#pragma acc kernels
{
for( i = 0; i < n; i++)
{
x[i] = 1.0f;
y[i] = 0.0f;
}
}
#pragma acc parallel loop
for( i = 0; i < n; i++ )
{
y[i] = saxpy_dev(2.0, x[i], y[i]);
}
}
fprintf(stdout, "y[0] = %f\n",y[0]);
return 0;
}
Below is the compilation command.
$ make
nvcc -rdc true -c saxpy_cuda_device.cu
pgc++ -fast -acc -ta=nvidia:rdc,cuda7.0 -c openacc_cuda_device.cpp
pgc++ -o openacc_cuda_device -fast -acc -ta=nvidia:rdc,cuda7.0 saxpy_cuda_device.o openacc_cuda_device.o -Mcuda
You can use the -Wc command line option to add the generated ptx file to the CUDA link line. I've opened a bug to make sure we document how to do this.
nvcc hello.cu -ptx -arch=sm_35
cc -hacc -hlist=a mainacc.c -Wc,hello.ptx
One suggestion is to provide both a host and device version of the subroutine and then use the "bind" clause to indicate which version to call from a compute region. This will allow you to maintain portability with the host code.
For example:
% cat radians.cu
extern "C" __device__ float cuda_radians( float f ){
return f*3.14159265;
}
extern "C" float radians( float f ){
return f*3.14159265;
}
% cat test.c
#include <stdio.h>
#include <stdlib.h>
#define N 10
#pragma acc routine (radians) bind(cuda_radians) seq
extern float radians( float f);
int main() {
int i;
float *hptr, *dptr;
hptr = (float *) calloc(N, sizeof(float));
#pragma acc parallel loop copy(hptr[0:N])
for(i=0; i<N; i++) {
hptr[i] = radians(i*0.1f);
}
for( i=0; i< N; i++)
printf("\n %dth value : %f", i, hptr[i]);
return 0;
}
% nvcc -c radians.cu --relocatable-device-code true
% pgcc -acc -ta=tesla:cuda7.0 -Minfo=accel test.c radians.o -V15.7 -Mcuda
test.c:
main:
15, Generating copy(hptr[:10])
Accelerator kernel generated
Generating Tesla code
16, #pragma acc loop gang, vector(128) /* blockIdx.x threadIdx.x */
% a.out
0th value : 0.000000
1th value : 0.314159
2th value : 0.628319
3th value : 0.942478
4th value : 1.256637
5th value : 1.570796
6th value : 1.884956
7th value : 2.199115
8th value : 2.513274
9th value : 2.827434

Simpson's method to integrate real valued functions with CUDA

I'm trying to code integration by Simpson's method in CUDA.
This is the formula for Simpson's rule
where x_k = a + k*h.
Here's my code
__device__ void initThreadBounds(int *n_start, int *n_end, int n,
int totalBlocks, int blockWidth)
{
int threadId = blockWidth * blockIdx.x + threadIdx.x;
int nextThreadId = threadId + 1;
int threads = blockWidth * totalBlocks;
*n_start = (threadId * n)/ threads;
*n_end = (nextThreadId * n)/ threads;
}
__device__ float reg_func (float x)
{
return x;
}
typedef float (*p_func) (float);
__device__ p_func integrale_f = reg_func;
__device__ void integralSimpsonMethod(int totalBlocks, int totalThreads,
double a, double b, int n, float p_function(float), float* result)
{
*result = 0;
float h = (b - a)/n;
//*result = p_function(a)+p_function(a + h * n);
//parallel
int idx_start;
int idx_end;
initThreadBounds(&idx_start, &idx_end, n-1, totalBlocks, totalThreads);
//parallel_ends
for (int i = idx_start; i < idx_end; i+=2) {
*result += ( p_function(a + h*(i-1)) +
4 * p_function(a + h*(i)) +
p_function(a + h*(i+1)) ) * h/3;
}
}
__global__ void integralSimpson(int totalBlocks, int totalThreads, float* result)
{
float res = 0;
integralSimpsonMethod(totalBlocks, totalThreads, 0, 10, 1000, integrale_f, &res);
result[(blockIdx.x*totalThreads + threadIdx.x)] = res;
//printf ("Simpson method\n");
}
__host__ void inttest()
{
const int blocksNum = 32;
const int threadNum = 32;
float *device_resultf;
float host_resultf[threadNum*blocksNum]={0};
cudaMalloc((void**) &device_resultf, sizeof(float)*threadNum*blocksNum);
integralSimpson<<<blocksNum, threadNum>>>(blocksNum, threadNum, device_resultf);
cudaThreadSynchronize();
cudaMemcpy(host_resultf, device_resultf, sizeof(float) *threadNum*blocksNum,
cudaMemcpyDeviceToHost);
float sum = 0;
for (int i = 0; i != blocksNum*threadNum; ++i) {
sum += host_resultf[i];
// printf ("result in %i cell = %f \n", i, host_resultf[i]);
}
printf ("sum = %f \n", sum);
cudaFree(device_resultf);
}
int main(int argc, char* argv[])
{
inttest();
int i;
scanf ("%d",&i);
}
The problem is: it works wrong when n is lower than 100000. For an integral from 0 to 10, the result is ~99, but when n = 100000 or larger it works fine and the result is ~50.
What's wrong, guys?
The basic problem here is that you don't understand your own algorithm.
Your integralSimpsonMethod() function is designed such that each thread is sampling at least 3 quadrature points per sub-interval in the integral domain. Therefore, if you choose n so that it is less than four times the number of threads in the kernel call, it is inevitable that each sub interval will overlap and the resulting integral will be incorrect. You need to make sure that the code checks and scales the thread count or n so that they don't produce overlap when the integral is computed.
If you are doing this for anything other than self-edification, then I recommend you look up the composite version of Simpson's rule. This is much better suited to parallel implementation and will be considerably more performant if implemented correctly.
I would propose an approach to Simpson's integration by using CUDA Thrust. You basically need five steps:
Generate the Simpson's quadrature weights;
Generate the function sampling points;
Generate the function values;
Calculate the elementwise product between the quadrature weights and the function values;
Sum the above products.
Step #1 requires creating an array with elements repeated many times, namely, 1 4 2 4 2 4 ... 1 for the Simpson's case. This can be accomplished by borrowing Robert Crovella's approach in cuda thrust library repeat vector multiple times.
Step #2 can be accomplished by using couting_iterators and borrowing talonmies approach in Purpose and usage of counting_iterators in CUDA Thrust library.
Step #3 is an application of thrust::transform.
Steps #4 and #5 can be accomplished together by thrust::inner_product.
This approach can be exploited also for use when other quadrature integration rules are of interest.
Here is the code
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/inner_product.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
// for printing
#include <thrust/copy.h>
#include <ostream>
#define STRIDE 2
#define N 100
#define pi_f 3.14159265358979f // Greek pi in single precision
struct sin_functor
{
__host__ __device__
float operator()(float x) const
{
return sin(2.f*pi_f*x);
}
};
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
int main(void)
{
// --- Generate the integration coefficients
thrust::host_vector<float> h_coefficients(STRIDE);
h_coefficients[0] = 4.f;
h_coefficients[1] = 2.f;
thrust::device_vector<float> d_coefficients(N);
typedef thrust::device_vector<float>::iterator Iterator;
strided_range<Iterator> pos1(d_coefficients.begin()+1, d_coefficients.end()-2, STRIDE);
strided_range<Iterator> pos2(d_coefficients.begin()+2, d_coefficients.end()-1, STRIDE);
thrust::fill(pos1.begin(), pos1.end(), h_coefficients[0]);
thrust::fill(pos2.begin(), pos2.end(), h_coefficients[1]);
d_coefficients[0] = 1.f;
d_coefficients[N-1] = 1.f;
// print the generated d_coefficients
std::cout << "d_coefficients: ";
thrust::copy(d_coefficients.begin(), d_coefficients.end(), std::ostream_iterator<float>(std::cout, " ")); std::cout << std::endl;
// --- Generate sampling points
float a = 0.f;
float b = .5f;
float Dx = (b-a)/(float)(N-1);
thrust::device_vector<float> d_x(N);
thrust::transform(thrust::make_counting_iterator(a/Dx),
thrust::make_counting_iterator((b+1.f)/Dx),
thrust::make_constant_iterator(Dx),
d_x.begin(),
thrust::multiplies<float>());
// --- Calculate function values
thrust::device_vector<float> d_y(N);
thrust::transform(d_x.begin(), d_x.end(), d_y.begin(), sin_functor());
// --- Calculate integral
float integral = (Dx/3.f) * thrust::inner_product(d_y.begin(), d_y.begin() + N, d_coefficients.begin(), 0.0f);
printf("The integral is = %f\n", integral);
getchar();
return 0;
}