I'm trying to compile a kernel in CUDA 5 that uses Surface Objects. However, this doesn't seem to work exactly as described in the manual.
__global__ void kernel_reset(cudaSurfaceObject_t surf)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
surf3Dwrite(0u, surf, x * sizeof(unsigned int), y, z, cudaBoundaryModeTrap);
}
This fails to compile with:
error : no instance of overloaded function "surf3Dwrite" matches the argument list
The overload I want is listed in surface_indirect_functions.h as:
static __forceinline__ __device__ void surf3Dwrite(unsigned int data, cudaSurfaceObject_t surfObject, int x, int y, int z, cudaSurfaceBoundaryMode boundaryMode = cudaBoundaryModeTrap)
Can anyone tell me what I'm doing wrong here?
Thanks.
I discovered the cause of this problem.
The code was ok - the problem was in the compiler arguments: Since Texture Objects require a compute capability of 2.0 or higher, I had to change the NVCC compiler settings from "compute_10,sm_10" to "compute_20,sm_20". This fixed the issue.
Thanks.
Related
I came across the sample code from one of my colleagues where the cudaMemset doesn't seem to work properly, when run on V100.
#include <iostream>
#include <stdio.h>
#define CUDACHECK(cmd) \
{\
cudaError_t error = cmd;\
if (error != cudaSuccess) { \
fprintf(stderr, "info: '%s'(%d) at %s:%d\n", cudaGetErrorString(error), error,__FILE__, __LINE__);\
}\
}
__global__ void setValue(int value, int* A_d) {
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx == 0){
A_d[tx] = A_d[tx] + value;
}
}
__global__ void printValue(int* A_d) {
int tx = threadIdx.x + blockIdx.x * blockDim.x;
if(tx == 0){
printf("A_d: %d\n", A_d[tx]);
}
}
int main(int argc, char* argv[ ]){
int *A_h, *A_d;
int size = sizeof(int);
A_h = (int*)malloc(size);
A_h[0] = 1;
CUDACHECK(cudaSetDevice(0));
CUDACHECK(cudaHostRegister(A_h, size, 0));
CUDACHECK(cudaHostGetDevicePointer((void**)&A_d, A_h, 0));
setValue<<<64,1,0,0>>>(5, A_d);
cudaDeviceSynchronize();
printf("A_h: %d\n", A_h[0]);
A_h[0] = 100;
printf("A_h: %d\n",A_h[0]);
printValue<<<64,1,0,0>>>(A_d);
cudaDeviceSynchronize();
CUDACHECK (cudaMemset(A_d, 1, size) );
printf("A_h: %d\n",A_h[0]);
printValue<<<64,1,0,0>>>(A_d);
cudaDeviceSynchronize();
cudaHostUnregister(A_h);
free(A_h);
}
When this sample is compiled and run, the output is seen as below.
/usr/local/cuda-11.0/bin/nvcc memsettest.cu -o test
./test
A_h: 6
A_h: 100
A_d: 100
A_h: 16843009
A_d: 16843009
We expect A_h and A_d to be set to 1 with cudaMemset. But it is set to some huge value as seen.
So, is cudaMemset expected to work on the device pointer A_d returned by cudaHostGetDevicePointer.
Is this A_d expected to be used only in kernels.
We also see that cudaMemcpy DtoH or HtoD seem to be working on the same device pointer A_d.
Can someone help us with the correct behavior.
We expect A_h and A_d to be set to 1 with cudaMemset.
You're confused about how cudaMemset works. Conceptually, it is very similar to memset from the C standard library. You should try your same test case with memset and see what it does.
Anyway, cudaMemset takes a pointer, a byte value, and a size in bytes to set, just like memset.
So your cudaMemset command:
CUDACHECK (cudaMemset(A_d, 1, size) );
is setting each byte to 1. Since size is 4, that means that you are setting A_d[0] to 0x01010101 (in hexadecimal). If you plug that value into your windows programmer calculator, the value is 16843009 in decimal. So everything is working as expected, here, from what I can see.
Again, I'm pretty sure you would see the same behavior with memset for the same test case/usage.
I am using CUDA 5.5 compute 3.5 on GTX 1080Ti and want to compute this formula:
y = a * a * b / 64 + c * c
Suppose I have these parameters:
a = 5876
b = 0.4474222958088
c = 664
I am computing this both via GPU and on the CPU and they give me different inexact answers:
h_data[0] = 6.822759375000e+05,
h_ref[0] = 6.822760000000e+05,
difference = -6.250000000000e-02
h_data is the CUDA answer, h_ref is the CPU answer. When I plug these into my calculator the GPU answer is closer to the exact answer, and I suspect this has to do with floating point precision. My question now is, how can I get the CUDA solution to match the precision/roundoff of CPU version? If I offset the a parameter by +/-1 the solutions match, but if I offset say the c parameter I still get a difference of 1/16
Here's the working code:
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
__global__ void test_func(float a, float b, int c, int nz, float * __restrict__ d_out)
{
float *fdes_out = d_out + blockIdx.x * nz;
float roffout2 = a * a / 64.f;
//float tmp = fma(roffout2,vel,index*index);
for (int tid = threadIdx.x; tid < nz; tid += blockDim.x) {
fdes_out[tid] = roffout2 * b + c * c;
}
}
int main (int argc, char **argv)
{
// parameters
float a = 5876.0f, b = 0.4474222958088f;
int c = 664;
int nz = 1;
float *d_data, *h_data, *h_ref;
h_data = (float*)malloc(nz*sizeof(float));
h_ref = (float*)malloc(nz*sizeof(float));
// CUDA
cudaMalloc((void**)&d_data, sizeof(float)*nz);
dim3 nb(1,1,1); dim3 nt(64,1,1);
test_func <<<nb,nt>>> (a,b,c,nz,d_data);
cudaMemcpy(h_data, d_data, sizeof(float)*nz, cudaMemcpyDeviceToHost);
// Reference
float roffout2 = a * a / 64.f;
h_ref[0] = roffout2*b + c*c;
// Compare
printf("h_data[0] = %1.12e,\nh_ref[0] = %1.12e,\ndifference = %1.12e\n",
h_data[0],h_ref[0],h_data[0]-h_ref[0]);
// Free
free(h_data); free(h_ref);
cudaFree(d_data);
return 0;
}
I'm compiling only with the-O3 flag.
This small numerical difference of one single-precision ulp occurs because the CUDA compiler applies FMA-merging by default, whereas the host compiler does not do that. FMA-merging can be turned off by adding the command line flag -fmad=false to the invocation of the CUDA compiler driver nvcc.
FMA-merging is a compiler optimization in which an FMUL and a dependent FADD are transformed into a single fused multiply-add, or FMA, instruction. An FMA instruction computes a*b+c such that the full unrounded product a*b enters into the addition with c before a final rounding is applied to produce the final result.
Usually, this has performance advantages, since a single FMA instruction is executed instead of two instructions FMUL, FADD, and all the instructions have similar latency. Usually, this also has accuracy advantages as the use of FMA eliminates one rounding step and guards against subtractive cancellation when a*c and c have opposite signs.
In this case, as noted by OP, the GPU result computed with FMA is slightly more accurate than the host result computed without FMA. Using a higher precision reference, I find that the relative error in the GPU result is -4.21e-8, while the relative error in the host result is 4.95e-8.
Im calling a cuda function from OpenACC compute region, and I want to specify the number of threads that should go into the cuda function, but it seems that I couldn't figure how to control that.
%main.cpp
..
#pragma acc routine vector
extern "C" void CUDA_KERNEL_FUNCTION(double *B, int ldb,const double *A, int lda);
..
#pragma acc parallel loop independent collapse(3) gang vector(128)
for(int i0 = 0; i0 < size0 - 31; i0+= 32)
for(int i1 = 0; i1 < size1 - 31; i1+= 32)
for(int i2 = 0; i2 < size2; i2+= 1)
CUDA_KERNEL_FUNCTION(B, ldb, A, lda);
..
..
%cuda_code.cu
extern "C" __device__ void CUDA_KERNEL_FUNCTION(double *B, int ldb,const double *A, int lda)
{
Num_Threads_gpu = blockDim.x * blockDim.y* blockDim.z;
//Num_Threads_gpu is always 32
}
The compilation is fine. But No matter what vector length I use, the number of threads that go into the cuda function is always 32. Is there any way to specify that?
I using "cuda/7.0.28" and "pgi/15.10"
Thanks
Try changing vector(128) to vector_length(128). I think PGI 15.10 supports both syntaxes, but just in case...
If that doesn't work, can you please post the compiler output with -Minfo=accel so that we can see what the compiler is doing?
I am reading and testing the examples in the book "Cuda By example. An introduction to General Purpose GPU Programming".
When testing the examples in chapter 7, relative to texture memory, I realized that access to global memory via texture cache is much slower than direct access (My NVIDIA GPU is GeForceGTX 260, compute capability 1.3 and I am using NVDIA CUDA 4.2):
Time per frame with texture fetch (1D or 2D) for a 256*256 image: 93 ms
Time per frame not using texture (just direct global access) for 256*256: 8.5 ms
I have double checked the code several times and I have also been reading the "CUDA C Programming guide" and "CUDA C Best practices Guide" which come along with the SDK, and I do not really understand the problem.
As far as I understand, texture memory is just global memory with a specific access mechanism implementation to make it look like a cache (?). I am wondering whether coalesced access to global memory will make texture fetch slower, but I cannot be sure.
Does anybody have a similar problem?
(I found some links in NVIDIA forums for a similar problem, but the link is no longer available.)
The testing code looks this way, only including the relevant parts:
//#define TEXTURE
//#define TEXTURE2
#ifdef TEXTURE
// According to C programming guide, it should be static (3.2.10.1.1)
static texture<float> texConstSrc;
static texture<float> texIn;
static texture<float> texOut;
#endif
__global__ void copy_const_kernel( float *iptr
#ifdef TEXTURE2
){
#else
,const float *cptr ) {
#endif
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
#ifdef TEXTURE2
float c = tex1Dfetch(texConstSrc,offset);
#else
float c = cptr[offset];
#endif
if ( c != 0) iptr[offset] = c;
}
__global__ void blend_kernel( float *outSrc,
#ifdef TEXTURE
bool dstOut ) {
#else
const float *inSrc ) {
#endif
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == SXRES-1) right--;
int top = offset - SYRES;
int bottom = offset + SYRES;
if (y == 0) top += SYRES;
if (y == SYRES-1) bottom -= SYRES;
#ifdef TEXTURE
float t, l, c, r, b;
if (dstOut) {
t = tex1Dfetch(texIn,top);
l = tex1Dfetch(texIn,left);
c = tex1Dfetch(texIn,offset);
r = tex1Dfetch(texIn,right);
b = tex1Dfetch(texIn,bottom);
} else {
t = tex1Dfetch(texOut,top);
l = tex1Dfetch(texOut,left);
c = tex1Dfetch(texOut,offset);
r = tex1Dfetch(texOut,right);
b = tex1Dfetch(texOut,bottom);
}
outSrc[offset] = c + SPEED * (t + b + r + l - 4 * c);
#else
outSrc[offset] = inSrc[offset] + SPEED * ( inSrc[top] +
inSrc[bottom] + inSrc[left] + inSrc[right] -
inSrc[offset]*4);
#endif
}
// globals needed by the update routine
struct DataBlock {
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
cudaEvent_t start, stop;
float totalTime;
float frames;
unsigned size;
unsigned char *output_host;
};
void anim_gpu( DataBlock *d, int ticks ) {
checkCudaErrors( cudaEventRecord( d->start, 0 ) );
dim3 blocks(SXRES/16,SYRES/16);
dim3 threads(16,16);
#ifdef TEXTURE
volatile bool dstOut = true;
#endif
for (int i=0; i<90; i++) {
#ifdef TEXTURE
float *in, *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
#ifdef TEXTURE2
copy_const_kernel<<<blocks,threads>>>( in );
#else
copy_const_kernel<<<blocks,threads>>>( in,
d->dev_constSrc );
#endif
blend_kernel<<<blocks,threads>>>( out, dstOut );
dstOut = !dstOut;
#else
copy_const_kernel<<<blocks,threads>>>( d->dev_inSrc,
d->dev_constSrc );
blend_kernel<<<blocks,threads>>>( d->dev_outSrc,
d->dev_inSrc );
swap( d->dev_inSrc, d->dev_outSrc );
#endif
}
// Some stuff for the events
// ...
}
I have been testing the results with the nvvp (NVIDIA profiler)
The result are quite curious as they show that there are a lot of texture cache misses (which are probably the cause for the bad performance).
The result from the profiler show also information that is difficult to understand even using the guide "CUPTI_User_GUide):
text_cache_hit: Number of texture cache hits (they are accounted only for one SM according to 1.3 capability).
text_cache_miss: Number of texture cache miss (they are accounted only for one SM according to 1.3 capability).
The following are the results for an example of 256*256 without using texture cache (only relevant info is shown):
Name Duration(ns) Grid_Size Block_Size
"copy_const_kernel(...) 22688 16,16,1 16,16,1
"blend_kernel(...)" 51360 16,16,1 16,16,1
Following are the results using 1D texture cache:
Name Duration(ns) Grid_Size Block_Size tex_cache_hit tex_cache_miss
"copy_const_kernel(...)" 147392 16,16,1 16,16,1 0 1024
"blend_kernel(...)" 841728 16,16,1 16,16,1 79 5041
Following are the results using 2D texture cache:
Name Duration(ns) Grid_Size Block_Size tex_cache_hit tex_cache_miss
"copy_const_kernel(...)" 150880 16,16,1 16,16,1 0 1024
"blend_kernel(...)" 872832 16,16,1 16,16,1 2971 2149
These result show several interesting info:
There are no cache hits at all for the "copy const" function (although ideally the memory is "spatially located", in the sense that each thread accesses memory which is near to the memory acceded by other near threads). I guess that this is because the threads within this function do not access memory from other threads, which seems to be the way for the texture cache to be usable (being the "spatially located" concept quite confusing)
There are some cache hits in the 1D and a lot more in the 2D case for the function "blend_kernel". I guess that it is due to the fact that within that function, any thread access memory from their neighbours threads. I cannot understand why there are more in 2D than 1d.
The duration time is greater in the texture cases than in the no texture case (nearly about one order of magnitude). Perhaps related with the so many texture cache misses.
For the "copy_const" function there are 1024 total accesses for the SM and 5120 for the "blend kernel". The relation 5:1 is correct due to the fact that there are 5 fetches in "blend" and only 1 in "copy_const". Anyway, I cannot understand where all this 1024 come from: ideally, this event "text cache miss/hot" only accounts for one SM (I have 24 in my GeForceGTX 260) and it only accounts for warps ( 32 thread size). Therefore, I have 256 threads/32=8 warps per SM and 256 blocks/24 = 10 or 11 "iterations" per SM, so I would be expecting something like 80 or 88 fetches (more over, some other event like sm_cta_launched, which is the number of thread blocks per SM, which is supposed to be supported in my 1.3 device, is always 0...)
It's the first parallel code of cuda by example .
Can any one describe me about the kernel call : <<< N , 1 >>>
This is the code with important points :
#define N 10
__global__ void add( int *a, int *b, int *c ) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
// fill the arrays 'a' and 'b' on the CPU
// copy the arrays 'a' and 'b' to the GPU
add<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
// display the results
// free the memory allocated on the GPU
return 0;
}
Why it used of <<< N , 1 >>> that it means we used of N blocks and 1 thread in each block ?? since we can write this <<< 1 , N >>> and used 1 block and N thread in this block for more optimization.
For this little example, there is no particular reason (as Bart already told you in the comments). But for a larger, more realistic example you should always keep in mind that the number of threads per block is limited. That is, if you use N = 10000, you could not use <<<1,N>>> anymore, but <<<N,1>>> would still work.