Compute Capability printf Function [duplicate] - cuda

What do we have to do to use cuPrintf()? (device compute capability 1.2, Ubuntu 12) I couldn't find "cuPrintf.cu" and "cudaPrintf.cuh", so i downloaded their code and include them:
#include "cuPrintf.cuh"
#include "cuPrintf.cu"
By the way this is the rest of the code:
__global__ void hello_kernel (float f) {
printf ("Thread number %d. f = %d\n", threadIdx.x, f);
}
int main () {
dim3 gridSize = dim3 (1);
dim3 blockSize = dim3 (16);
cudaPrintfInit ();
hello_kernel <<< gridSize, blockSize >>> (1.2345f);
cudaPrintfDisplay (stdout, true);
cudaPrintfEnd ();
return (0);
}
But nvcc still gives a mistake:
max#max-Lenovo-G560:~/CUDA/matrixMult$ nvcc printfTest.cu -o printfTest
printfTest.cu(5): error: calling a __host__ function("printf") from a __global__
function("hello_kernel") is not allowed
Thanks!

In your kernel instead of this:
printf ("Thread number %d. f = %d\n", threadIdx.x, f);
you should do this:
cuPrintf ("Thread number %d. f = %d\n", threadIdx.x, f);
Other than that, I believe your code is correct (it works for me).
This SO question/answer gives more tips about using cuPrintf properly.

Include <stdio.h> and compile with -arch=sm_20.
Details:
code:
#include <stdio.h>
__global__ void hello_kernel (float f) {
printf ("Thread number %d. f = %d\n", threadIdx.x, f);
}
int main(){
return 0;
}
compilations:
nvcc -arch=sm_20 -o printfTest printfTest.cu

Related

Can't I call a __host__ __device__ function from a __device__ function?

In CUDA documentation I found that cudaDeviceGetAttribute is a __host__ __device__ function. So I thought I could call it in my __global__ function to get some attributes of my device. Sadly it seems to mean something different because I get an compile error event if I put it into a __device__ function and call this one from my global.
Is it possible to call cudaDeviceGetAttribute on my GPU? or what else does __host__ __device__ mean?
Here is my source code:
__device__ void GetAttributes(int* unique)
{
cudaDeviceAttr attr = cudaDevAttrMaxThreadsPerBlock;
cudaDeviceGetAttribute(unique, attr, 0);
}
__global__ void ClockTest(int* a, int* b, long* return_time, int* unique)
{
clock_t start = clock();
//some complex calculations
*a = *a + *b;
*b = *a + *a;
GetAttributes(unique);
*a = *a + *b - *a;
clock_t end = clock();
*return_time = end - start;
}
int main()
{
int a = 2;
int b = 3;
long time = 0;
int uni;
int* dev_a;
int* dev_b;
long* dev_time;
int* unique;
for (int i = 0; i < 10; ++i) {
cudaMalloc(&dev_a, sizeof(int));
cudaMalloc(&dev_b, sizeof(int));
cudaMalloc(&dev_time, sizeof(long));
cudaMalloc(&unique, sizeof(int));
cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, sizeof(int), cudaMemcpyHostToDevice);
ClockTest <<<1,1>>>(dev_a, dev_b, dev_time, unique);
cudaMemcpy(&a, dev_a, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&time, dev_time, sizeof(long), cudaMemcpyDeviceToHost);
cudaMemcpy(&uni, unique, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(&dev_a);
cudaFree(&dev_b);
cudaFree(&dev_time);
cudaFree(&unique);
printf("%d\n", time);
printf("unique: %d\n", uni);
cudaDeviceReset();
}
return 0;
}
EDIT: sorry, my previous answer was not correct. There does seems to be a problem in nvcc (see below).
cudaDeviceGetAttribute can work correctly in device code, here is a worked example on K20X, CUDA 8.0.61:
$ cat t1305.cu
#include <stdio.h>
__global__ void tkernel(){
int val;
cudaError_t err = cudaDeviceGetAttribute(&val, cudaDevAttrMaxThreadsPerBlock, 0);
printf("err = %d, %s\n", err, cudaGetErrorString(err));
printf("val = %d\n", val);
}
int main(){
tkernel<<<1,1>>>();
cudaDeviceSynchronize();
}
$ nvcc -arch=sm_35 -o t1305 t1305.cu -rdc=true -lcudadevrt
$ cuda-memcheck ./t1305
========= CUDA-MEMCHECK
err = 0, no error
val = 1024
========= ERROR SUMMARY: 0 errors
$
There are various runtime API functions supported for use in device code.
For the supported runtime API functions, it's generally necessary to:
compile for a cc 3.5 or higher device
compile with relocatable device code
link against the cuda device runtime library
In addition, your code has some other coding errors in that we do not pass the address of the pointer to cudaFree, just the pointer itself.
Caveats for this particular function:
There appears to be a problem in the CUDA compiler that if this device runtime API call is used without any other runtime API call in the kernel code, then the code generation will not happen correctly. The workaround at this time is to make sure your kernel contains at least one other cuda runtime API call. In my above example I used cudaGetErrorString, but you could e.g. use cudaDeviceSynchronize() or anything else, I think. I have filed an internal NVIDIA bug to report this issue.
There appears to be a documentation error in the list of device runtime API calls supported in the CDP section of the programming guide (link above). The function cudaGetDeviceProperty does not exist, but I believe it should refer to cudaDeviceGetAttribute. I have filed an internal NVIDIA bug for this documentation error.

OpenACC: calling cuda __device__ kernel from OpenACC parallel loop

If I have simple test cuda kernel in hello.cu file as:
extern "C" __device__ float radians( float f ){
return f*3.14159265;
}
And test OpenACC code in mainacc.c:
#include <stdio.h>
#include <stdlib.h>
#define N 10
#pragma acc routine seq
extern float radians( float );
int main() {
int i;
float *hptr, *dptr;
hptr = (float *) calloc(N, sizeof(float));
#pragma acc parallel loop copy(hptr[0:N])
for(i=0; i<N; i++) {
hptr[i] = radians(i*0.1f);
}
for( i=0; i< N; i++)
printf("\n %dth value : %f", i, hptr[i]);
return 0;
}
If I try to compile this code as below I get link time errors:
nvcc hello.cu -c
cc -hacc -hlist=a mainacc.c hello.o
nvlink error : Undefined reference to 'radians' in '/tmp/pe_20271//app_cubin_20271.omainacc_1.o__sec.cubin'
cuda_link: nvlink fatal error
I tried nvcc with "--relocatable-device-code true” option etc but no success. Loaded modules are:
craype-accel-nvidia35
cudatoolkit/6.5
PrgEnv-cray/5.2.40
Could you tell me correct way to use cuda device kernel within OpenACC?
I've been able to make this sort of mixing work with PGI, but I've not yet been able to produce a sample that works with the Cray compiler. Here's a simple example that works for PGI.
This is the file containing the CUDA.
// saxpy_cuda_device.cu
extern "C"
__device__
float saxpy_dev(float a, float x, float y)
{
return a * x + y;
}
This is the file containing OpenACC.
// openacc_cuda_device.cpp
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#pragma acc routine seq
extern "C" float saxpy_dev(float, float, float);
int main(int argc, char **argv)
{
float *x, *y, tmp;
int n = 1<<20, i;
x = (float*)malloc(n*sizeof(float));
y = (float*)malloc(n*sizeof(float));
#pragma acc data create(x[0:n]) copyout(y[0:n])
{
#pragma acc kernels
{
for( i = 0; i < n; i++)
{
x[i] = 1.0f;
y[i] = 0.0f;
}
}
#pragma acc parallel loop
for( i = 0; i < n; i++ )
{
y[i] = saxpy_dev(2.0, x[i], y[i]);
}
}
fprintf(stdout, "y[0] = %f\n",y[0]);
return 0;
}
Below is the compilation command.
$ make
nvcc -rdc true -c saxpy_cuda_device.cu
pgc++ -fast -acc -ta=nvidia:rdc,cuda7.0 -c openacc_cuda_device.cpp
pgc++ -o openacc_cuda_device -fast -acc -ta=nvidia:rdc,cuda7.0 saxpy_cuda_device.o openacc_cuda_device.o -Mcuda
You can use the -Wc command line option to add the generated ptx file to the CUDA link line. I've opened a bug to make sure we document how to do this.
nvcc hello.cu -ptx -arch=sm_35
cc -hacc -hlist=a mainacc.c -Wc,hello.ptx
One suggestion is to provide both a host and device version of the subroutine and then use the "bind" clause to indicate which version to call from a compute region. This will allow you to maintain portability with the host code.
For example:
% cat radians.cu
extern "C" __device__ float cuda_radians( float f ){
return f*3.14159265;
}
extern "C" float radians( float f ){
return f*3.14159265;
}
% cat test.c
#include <stdio.h>
#include <stdlib.h>
#define N 10
#pragma acc routine (radians) bind(cuda_radians) seq
extern float radians( float f);
int main() {
int i;
float *hptr, *dptr;
hptr = (float *) calloc(N, sizeof(float));
#pragma acc parallel loop copy(hptr[0:N])
for(i=0; i<N; i++) {
hptr[i] = radians(i*0.1f);
}
for( i=0; i< N; i++)
printf("\n %dth value : %f", i, hptr[i]);
return 0;
}
% nvcc -c radians.cu --relocatable-device-code true
% pgcc -acc -ta=tesla:cuda7.0 -Minfo=accel test.c radians.o -V15.7 -Mcuda
test.c:
main:
15, Generating copy(hptr[:10])
Accelerator kernel generated
Generating Tesla code
16, #pragma acc loop gang, vector(128) /* blockIdx.x threadIdx.x */
% a.out
0th value : 0.000000
1th value : 0.314159
2th value : 0.628319
3th value : 0.942478
4th value : 1.256637
5th value : 1.570796
6th value : 1.884956
7th value : 2.199115
8th value : 2.513274
9th value : 2.827434

memset in CUBLAS gemm is always launched in default stream

I noticed that when calling cublasSgemm function for each call of gemm from a host, there are 3 kernel invocations: memset, scal_kernel and gemm kernel itself (e.g. sgemm_large). This happens even if I use constants alpha/beta allocated in device memory. While the overhead of memset and scal_kernel is relatively small, the problem is memset is always launched in default stream which causes unnecessary synchronization.
The code:
__constant__ __device__ float alpha = 1;
__constant__ __device__ float beta = 1;
int main()
{
// ... memory allocation skipped ...
float* px = thrust::raw_pointer_cast(x.data());
float* py = thrust::raw_pointer_cast(y.data());
float* pmat = thrust::raw_pointer_cast(mat.data());
for (int iter = 0; iter < 3; ++iter)
{
cbstatus = cublasSgemm(cbh, CUBLAS_OP_N, CUBLAS_OP_N, crow, ccol, cshared, &alpha, px, crow, py, cshared, &beta, pmat, crow);
assert(0 == cbstatus);
}
}
This is what I see in profiler:
The question: is there a way to avoid memset or make it run in the stream assigned to CUBLAS handle?
One idea is to use DP and run device version of the gemm function, but this will work only on CC 3.0 and higher.
There was a bug in CUBLAS5.5 where a cudaMemset was used instead of cudaMemsetAsync in the specialized path where k >> m,n.
It is fixed in CUBLAS6.0 RC. And you can have access to it if you are a registered developer.
Btw, I wonder why you use __constant__ __device__ for alpha,beta.
Are you using pointerMode = DEVICE?
If not, you could simply use alpha,beta on the host.
Try the code below. The code is conceived to have only a cublasSgemm call, apart from unavoidable memory allocations and copies. You will see that
You have only one kernel launched (gemm_kernel1x1_core);
The two calls to cublasSgemm run perfectly in two different streams.
In the picture, the Visual Profiler timeline is shown.
My system: GeForce 540M, Windows 7, CUDA 5.5.
#include <conio.h>
#include <stdio.h>
#include <assert.h>
#include <cublas_v2.h>
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { getchar(); exit(code); }
}
}
/**********************/
/* cuBLAS ERROR CHECK */
/**********************/
#ifndef cublasSafeCall
#define cublasSafeCall(err) __cublasSafeCall(err, __FILE__, __LINE__)
#endif
inline void __cublasSafeCall(cublasStatus_t err, const char *file, const int line)
{
if( CUBLAS_STATUS_SUCCESS != err) {
fprintf(stderr, "CUBLAS error in file '%s', line %d\n \nerror %d \nterminating!\n",__FILE__, __LINE__,err);
getch(); cudaDeviceReset(); assert(0);
}
}
/********/
/* MAIN */
/********/
int main()
{
int N = 5;
float *A1, *A2, *B1, *B2, *C1, *C2;
float *d_A1, *d_A2, *d_B1, *d_B2, *d_C1, *d_C2;
A1 = (float*)malloc(N*N*sizeof(float));
B1 = (float*)malloc(N*N*sizeof(float));
C1 = (float*)malloc(N*N*sizeof(float));
A2 = (float*)malloc(N*N*sizeof(float));
B2 = (float*)malloc(N*N*sizeof(float));
C2 = (float*)malloc(N*N*sizeof(float));
gpuErrchk(cudaMalloc((void**)&d_A1,N*N*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_B1,N*N*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_C1,N*N*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_A2,N*N*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_B2,N*N*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&d_C2,N*N*sizeof(float)));
for (int i=0; i<N*N; i++) {
A1[i] = ((float)rand()/(float)RAND_MAX);
A2[i] = ((float)rand()/(float)RAND_MAX);
B1[i] = ((float)rand()/(float)RAND_MAX);
B2[i] = ((float)rand()/(float)RAND_MAX);
}
gpuErrchk(cudaMemcpy(d_A1, A1, N*N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_B1, B1, N*N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_A2, A2, N*N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_B2, B2, N*N*sizeof(float), cudaMemcpyHostToDevice));
cublasHandle_t handle;
cublasSafeCall(cublasCreate(&handle));
cudaStream_t stream1, stream2;
gpuErrchk(cudaStreamCreate(&stream1));
gpuErrchk(cudaStreamCreate(&stream2));
float alpha = 1.f;
float beta = 1.f;
cublasSafeCall(cublasSetStream(handle,stream1));
cublasSafeCall(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A1, N, d_B1, N, &beta, d_C1, N));
cublasSafeCall(cublasSetStream(handle,stream2));
cublasSafeCall(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A2, N, d_B2, N, &beta, d_C2, N));
gpuErrchk(cudaDeviceReset());
return 0;
}

Is it possible to use thrust::device_ptr on a mapped array?

I am trying to use the thrust::copy_if function on mapped memory. However, as I get a runtime error and I am not being able to find it, before spending a lot of time in debugging, I would like to have a confirmation of the fact that it is effectively allowed to pass a pointer to a mapped memory location to the thrust::device_ptr wrapper.
Here is an example of what I mean:
int size=1024;
int* v_locked;
int* v_device;
int* stencil_device;
device_ptr<int> v_wrapper;
device_ptr<int> v_wrapper_end;
device_ptr<int> stencil_wrapper;
cudaHostAlloc((void**)&v_locked, size*sizeof(int), cudaHostAllocMapped));
cudaHostGetDevicePointer(&v_device, &v_locked, 0);
cudaMalloc((void**)&stencil_device, size*sizeof(int));
/*
kernel assigning stencil_device elements ...
*/
v_wrapper = device_pointer_cast(v_device);
stencil_wrapper = device_pointer_cast(stencil_device);
v_wrapper_end = copy_if(make_counting_iterator<int>(0), make_counting_iterator<int>(size), stencil_wrapper, v_wrapper, _1 == 1);
Is this a correct usage of mapped memory with thrust library?
Thank you.
Yes, it is possible.
I believe there were several problems with your code.
You don't appear to be doing any proper cuda error checking If you were, you would have detected that although your calls to cudaHostGetDevicePointer seem to compile correctly, they were not set up correctly.
As mentioned above, your calls to cudaHostGetDevicePointer() were not set up correctly. The second pointer argument is passed as a single pointer (*), not double pointer (**). Refer to the documentation This call as written would throw a cuda runtime error which you can trap.
Prior to your cudaHostAlloc calls, you should use the cudaSetDeviceFlags(cudaDeviceMapHost); call to enable this feature.
Here is a sample code which seems to work correctly for me, and has the above problems fixed:
$ cat t281.cu
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
template<typename T>
struct is_one : thrust::unary_function<T, bool>
{
__host__ __device__
bool operator()(const T &x)
{
return (x==1);
}
};
int main(){
int size=1024;
int* v_locked;
int* v_device;
int* stencil_locked;
int* stencil_device;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaCheckErrors("cudaSetDeviceFlags");
cudaHostAlloc((void**)&v_locked, size*sizeof(int), cudaHostAllocMapped);
cudaCheckErrors("cudaHostAlloc 1");
cudaHostGetDevicePointer(&v_device, v_locked, 0);
cudaCheckErrors("cudaHostGetDevicePointer 1");
cudaHostAlloc((void**)&stencil_locked, size*sizeof(int), cudaHostAllocMapped);
cudaCheckErrors("cudaHostAlloc 2");
cudaHostGetDevicePointer(&stencil_device, stencil_locked, 0);
cudaCheckErrors("cudaHostGetDevicePointer 2");
for (int i = 0; i < size; i++){
v_locked[i] = i;
stencil_locked[i] = i%2;}
thrust::device_ptr<int> v_wrapper = thrust::device_pointer_cast(v_device);
thrust::device_ptr<int> stencil_wrapper = thrust::device_pointer_cast(stencil_device);
thrust::device_ptr<int> v_wrapper_end = v_wrapper + size;
thrust::device_vector<int> result(size);
thrust::device_vector<int>::iterator result_end = copy_if(v_wrapper, v_wrapper_end, stencil_wrapper, result.begin(), is_one<int>());
int result_size = result_end - result.begin();
thrust::host_vector<int> h_result(result_size);
thrust::copy_n(result.begin(), result_size, h_result.begin());
thrust::copy_n(h_result.begin(), 10, std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
return 0;
}
$ nvcc -arch=sm_20 -o t281 t281.cu
$ ./t281
1 3 5 7 9 11 13 15 17 19
$

How do we use cuPrintf()?

What do we have to do to use cuPrintf()? (device compute capability 1.2, Ubuntu 12) I couldn't find "cuPrintf.cu" and "cudaPrintf.cuh", so i downloaded their code and include them:
#include "cuPrintf.cuh"
#include "cuPrintf.cu"
By the way this is the rest of the code:
__global__ void hello_kernel (float f) {
printf ("Thread number %d. f = %d\n", threadIdx.x, f);
}
int main () {
dim3 gridSize = dim3 (1);
dim3 blockSize = dim3 (16);
cudaPrintfInit ();
hello_kernel <<< gridSize, blockSize >>> (1.2345f);
cudaPrintfDisplay (stdout, true);
cudaPrintfEnd ();
return (0);
}
But nvcc still gives a mistake:
max#max-Lenovo-G560:~/CUDA/matrixMult$ nvcc printfTest.cu -o printfTest
printfTest.cu(5): error: calling a __host__ function("printf") from a __global__
function("hello_kernel") is not allowed
Thanks!
In your kernel instead of this:
printf ("Thread number %d. f = %d\n", threadIdx.x, f);
you should do this:
cuPrintf ("Thread number %d. f = %d\n", threadIdx.x, f);
Other than that, I believe your code is correct (it works for me).
This SO question/answer gives more tips about using cuPrintf properly.
Include <stdio.h> and compile with -arch=sm_20.
Details:
code:
#include <stdio.h>
__global__ void hello_kernel (float f) {
printf ("Thread number %d. f = %d\n", threadIdx.x, f);
}
int main(){
return 0;
}
compilations:
nvcc -arch=sm_20 -o printfTest printfTest.cu