cuda & rdc & thrust in multiple shared objects results in SIGSEV in registerEntryFunction - cuda

I'm trying to run relocatable-device-code in two shared libraries, both using cuda-thrust. Everything runs fine if I stop using thrust in kernel.cu, which is not an option.
edit: The program works too if rdc is disabled. Not an option for me either.
It compiles fine but stops with a segfault when run. gdb tells me this:
Program received signal SIGSEGV, Segmentation fault.
0x0000000000422cc8 in cudart::globalState::registerEntryFunction(void**, char const*, char*, char const*, int, uint3*, uint3*, dim3*, dim3*, int*) ()
(cuda-gdb) bt
#0 0x0000000000422cc8 in cudart::globalState::registerEntryFunction(void**, char const*, char*, char const*, int, uint3*, uint3*, dim3*, dim3*, int*) ()
#1 0x000000000040876c in __cudaRegisterFunction ()
#2 0x0000000000402b58 in __nv_cudaEntityRegisterCallback(void**) ()
#3 0x00007ffff75051a3 in __cudaRegisterLinkedBinary(__fatBinC_Wrapper_t const*, void (*)(void**), void*) ()
from /home/mindoms/rdctestmcsimple/libkernel.so
#4 0x00007ffff75050b1 in __cudaRegisterLinkedBinary_66_tmpxft_00007a5f_00000000_16_cuda_device_runtime_ compute_52_cpp1_ii_8b1a5d37 () from /home/user/rdctestmcsimple/libkernel.so
#5 0x000000000045285d in __libc_csu_init ()
#6 0x00007ffff65ea50f in __libc_start_main () from /lib64/libc.so.6
Here is my stripped down example (using cmake) that shows the error.
main.cpp:
#include "kernel.cuh"
#include "kernel2.cuh"
int main(){
Kernel k;
k.callKernel();
Kernel2 k2;
k2.callKernel2();
}
kernel.cuh:
#ifndef __KERNEL_CUH__
#define __KERNEL_CUH__
class Kernel{
public:
void callKernel();
};
#endif
kernel.cu:
#include "kernel.cuh"
#include <stdio.h>
#include <iostream>
#include <thrust/device_vector.h>
__global__
void thekernel(int *data){
if (threadIdx.x == 0)
printf("the kernel says hello\n");
data[threadIdx.x] = threadIdx.x * 2;
}
void Kernel::callKernel(){
thrust::device_vector<int> D2;
D2.resize(11);
int * raw_ptr = thrust::raw_pointer_cast(&D2[0]);
printf("Kernel::callKernel called\n");
thekernel <<< 1, 10 >>> (raw_ptr);
cudaThreadSynchronize();
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
std::cout << "Cuda error: " << cudaGetErrorString(code) << " after callKernel!" << std::endl;
}
for (int i = 0; i < D2.size(); i++)
std::cout << "Kernel D[" << i << "]=" << D2[i] << std::endl;
}
kernel2.cuh:
#ifndef __KERNEL2_CUH__
#define __KERNEL2_CUH__
class Kernel2{
public:
void callKernel2();
};
#endif
kernel2.cu
#include "kernel2.cuh"
#include <stdio.h>
#include <iostream>
#include <thrust/device_vector.h>
__global__
void thekernel2(int *data2){
if (threadIdx.x == 0)
printf("the kernel2 says hello\n");
data2[threadIdx.x] = threadIdx.x * 2;
}
void Kernel2::callKernel2(){
thrust::device_vector<int> D;
D.resize(11);
int * raw_ptr = thrust::raw_pointer_cast(&D[0]);
printf("Kernel2::callKernel2 called\n");
thekernel2 <<< 1, 10 >>> (raw_ptr);
cudaThreadSynchronize();
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
std::cout << "Cuda error: " << cudaGetErrorString(code) << " after callKernel2!" << std::endl;
}
for (int i = 0; i < D.size(); i++)
std::cout << "Kernel2 D[" << i << "]=" << D[i] << std::endl;
}
The cmake file below was used originally, but I get the same problem when I compile "by hand":
nvcc -arch=sm_35 -Xcompiler -fPIC -dc kernel2.cu
nvcc -arch=sm_35 -shared -Xcompiler -fPIC kernel2.o -o libkernel2.so
nvcc -arch=sm_35 -Xcompiler -fPIC -dc kernel.cu
nvcc -arch=sm_35 -shared -Xcompiler -fPIC kernel.o -o libkernel.so
g++ -o main main.cpp libkernel.so libkernel2.so -L/opt/cuda/current/lib64
Adding -cudart shared to every nvcc call as suggested somewhere results in a different error:
warning: Cuda API error detected: cudaFuncGetAttributes returned (0x8)
terminate called after throwing an instance of 'thrust::system::system_error'
what(): function_attributes(): after cudaFuncGetAttributes: invalid device function
Program received signal SIGABRT, Aborted.
0x000000313c432625 in raise () from /lib64/libc.so.6
(cuda-gdb) bt
#0 0x000000313c432625 in raise () from /lib64/libc.so.6
#1 0x000000313c433e05 in abort () from /lib64/libc.so.6
#2 0x00000031430bea7d in __gnu_cxx::__verbose_terminate_handler() () from /usr/lib64/libstdc++.so.6
#3 0x00000031430bcbd6 in std::set_unexpected(void (*)()) () from /usr/lib64/libstdc++.so.6
#4 0x00000031430bcc03 in std::terminate() () from /usr/lib64/libstdc++.so.6
#5 0x00000031430bcc86 in __cxa_rethrow () from /usr/lib64/libstdc++.so.6
#6 0x00007ffff7d600eb in thrust::detail::vector_base<int, thrust::device_malloc_allocator<int> >::append(unsigned long) () from ./libkernel.so
#7 0x00007ffff7d5f740 in thrust::detail::vector_base<int, thrust::device_malloc_allocator<int> >::resize(unsigned long) () from ./libkernel.so
#8 0x00007ffff7d5b19a in Kernel::callKernel() () from ./libkernel.so
#9 0x00000000004006f8 in main ()
CMakeLists.txt: Please adjust to your environment
cmake_minimum_required(VERSION 2.6.2)
project(Cuda-project)
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/CMake/cuda" ${CMAKE_MODULE_PATH})
SET(CUDA_TOOLKIT_ROOT_DIR "/opt/cuda/current")
SET(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -gencode arch=compute_52,code=sm_52)
find_package(CUDA REQUIRED)
link_directories(${CUDA_TOOLKIT_ROOT_DIR}/lib64)
set(CUDA_SEPARABLE_COMPILATION ON)
set(BUILD_SHARED_LIBS ON)
list(APPEND CUDA_NVCC_FLAGS -Xcompiler -fPIC)
CUDA_ADD_LIBRARY(kernel
kernel.cu
)
CUDA_ADD_LIBRARY(kernel2
kernel2.cu
)
cuda_add_executable(rdctest main.cpp)
TARGET_LINK_LIBRARIES(rdctest kernel kernel2 cudadevrt)
About my system:
Fedora 23
kernel: 4.4.2-301.fc23.x86_64
Nvidia Driver: 361.28
Nvidia Toolkit: 7.5.18
g++: g++ (GCC) 5.3.1 20151207 (Red Hat 5.3.1-2)
Reproduced on:
CentOS release 6.7 (Final)
Kernel: 2.6.32-573.8.1.el6.x86_64
Nvidia Driver: 352.55
Nvidia Toolkit: 7.5.18
g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16)
glibc 2.12
cmake to 3.5

Apparently, this has something to do with what cuda runtime is used: shared or static.
I slightly modified your example: Instead of building two shared libraries and linking them to the executable individually, I create two static libraries that are linked together to one shared library, and that one is linked to the executable.
Also, here is an updated CMake file that uses the new (>= 3.8) native CUDA language support.
cmake_minimum_required(VERSION 3.8)
project (CudaSharedThrust CXX CUDA)
string(APPEND CMAKE_CUDA_FLAGS " -gencode arch=compute_61,code=compute_61")
if(BUILD_SHARED_LIBS)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()
add_library(kernel STATIC kernel.cu)
set_target_properties(kernel PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
add_library(kernel2 STATIC kernel2.cu)
set_target_properties(kernel2 PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
add_library(allkernels empty.cu) # empty.cu is an empty file
set_target_properties(allkernels PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
target_link_libraries(allkernels kernel kernel2)
add_executable(rdctest main.cpp)
set_target_properties(rdctest PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
target_link_libraries(rdctest allkernels)
Building this without any CMake flags (static build), the build succeeds and the program works.
Building with -DBUILD_SHARED_LIBS=ON, the program compiles, but it crashes with the same error is yours.
Building with
cmake .. -DBUILD_SHARED_LIBS=ON -DCMAKE_CUDA_FLAGS:STRING="--cudart shared"
compiles, and actually makes it run! So for some reason, the shared CUDA runtime is required for this sort of thing.
Also note that the step from 2 SO's -> 2 Static Libs in 1 SO was necessary, because otherwise the program would crash with a hrust::system::system_error.
This, however is expected because NVCC actually ignores shared object files during device linking: http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#libraries

Related

How to call a Thrust function in a stream from a kernel?

I want to make thrust::scatter asynchronous by calling it in a device kernel(I could also do it by calling it in another host thread). thrust::cuda::par.on(stream) is host function that cannot be called from a device kernel. The following code was tried with CUDA 10.1 on Turing architecture.
__global__ void async_scatter_kernel(float* first,
float* last,
int* map,
float* output)
{
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
thrust::scatter(thrust::cuda::par.on(stream), first, last, map, output);
cudaDeviceSynchronize();
cudaStreamDestroy(stream);
}
I know thrust uses dynamic parallelism to launch its kernels when called from the device, however I couldn't find a way to specify the stream.
The following code compiles cleanly for me on CUDA 10.1.243:
$ cat t1518.cu
#include <thrust/scatter.h>
#include <thrust/execution_policy.h>
__global__ void async_scatter_kernel(float* first,
float* last,
int* map,
float* output)
{
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
thrust::scatter(thrust::cuda::par.on(stream), first, last, map, output);
cudaDeviceSynchronize();
cudaStreamDestroy(stream);
}
int main(){
float *first = NULL;
float *last = NULL;
float *output = NULL;
int *map = NULL;
async_scatter_kernel<<<1,1>>>(first, last, map, output);
cudaDeviceSynchronize();
}
$ nvcc -arch=sm_35 -rdc=true t1518.cu -o t1518
$ nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2019 NVIDIA Corporation
Built on Sun_Jul_28_19:07:16_PDT_2019
Cuda compilation tools, release 10.1, V10.1.243
$
The -arch=sm_35 (or similar) and -rdc=true are necessary (but not in all cases sufficient) compile switches for any code that uses CUDA Dynamic Parallelism. If you omit, for example, the -rdc=true switch, you get an error similar to what you describe:
$ nvcc -arch=sm_35 t1518.cu -o t1518
t1518.cu(11): error: calling a __host__ function("thrust::cuda_cub::par_t::on const") from a __global__ function("async_scatter_kernel") is not allowed
t1518.cu(11): error: identifier "thrust::cuda_cub::par_t::on const" is undefined in device code
2 errors detected in the compilation of "/tmp/tmpxft_00003a80_00000000-8_t1518.cpp1.ii".
$
So, for the example you have shown here, your compilation error can be eliminated either by updating to the latest CUDA version or by specifying the proper command line, or both.

Cuda - nvcc - No kernel image is available for execution on the device. What is the problem?

I'm trying to use nvcc with the most simple example, but it doesn't work correctly. I'm compiling and execute the example from https://devblogs.nvidia.com/easy-introduction-cuda-c-and-c/, however my server can't execute the global function. I rewrite the code to get some error message and I receive the following message:
"no kernel image is available for execution on the device"
My GPU is a Quadro 6000 and the cuda version is 9.0.
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] = 10.0; //a*x[i] + y[i];
}
int main(int argc, char *argv[])
{
int N = 120;
int nDevices;
float *x, *y, *d_x, *d_y;
cudaError_t err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess)
printf("%s\n", cudaGetErrorString(err));
else
printf("Number of devices %d\n", nDevices);
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<1, 1>>>(N, 2.0f, d_x, d_y);
cudaDeviceSynchronize();
err = cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
printf("%s\n",cudaGetErrorString(err));
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}"
Execution command
bash-4.1$ nvcc -o sapx simples_cuda.cu
bash-4.1$ ./sapx
Number of devices 1
no error
Sync kernel error: no kernel image is available for execution on the device
GPUs of compute capability less than 2.0 are only supported by CUDA toolkits of version 6.5 and older.
GPUs of compute capability less than 3.0 (but greater than or equal to 2.0) are only supported by CUDA toolkits of version 8.0 and older.
Your Quadro 6000 is a compute capability 2.0 GPU. This can be determined programmatically with the deviceQuery CUDA sample code, or via a google search. It is not supported by CUDA 9.0
You should add the compute capability of your Video Card as a parameter to the nvcc compiler. In my case (windows/Visual Studio 2017) I set this at the Code Generation field. So as #einpoklum answered before add the gencode parameter like this -gencode arch=compute_${COMPUTE_CAPABILITY},code=compute_${SM_CAPABILITY} where {COMPUTE_CAPABILITY} and {SM_CAPABILITY} belong to the following pairs (you can add them all as VS2017 do),
{COMPUTE_CAPABILITY},{SM_CAPABILITY}
compute_35,sm_35
compute_37,sm_37
compute_50,sm_50
compute_52,sm_52
compute_60,sm_60
compute_61,sm_61
compute_70,sm_70
compute_75,sm_75
compute_80,sm_80
D:\Program Files\nVidia\CUDA Samples\MySamples\IntroToCUDA_1\IntroToCUDA_1>"D:\Program Files\nVidia\GPU Computing Toolkit\CUDA\v11.0\bin\nvcc.exe" -gencode=arch=compute_35,code=\"sm_35,compute_35\" -gencode=arch=compute_37,code=\"sm_37,compute_37\" -gencode=arch=compute_50,code=\"sm_50,compute_50\" -gencode=arch=compute_52,code=\"sm_52,compute_52\" -gencode=arch=compute_60,code=\"sm_60,compute_60\" -gencode=arch=compute_61,code=\"sm_61,compute_61\" -gencode=arch=compute_70,code=\"sm_70,compute_70\" -gencode=arch=compute_75,code=\"sm_75,compute_75\" -gencode=arch=compute_80,code=\"sm_80,compute_80\" --use-local-env -ccbin "D:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Tools\MSVC\14.16.27023\bin\HostX86\x64" -x cu -I"D:\Program Files\nVidia\GPU Computing Toolkit\CUDA\v11.0\include" -I"D:\Program Files\nVidia\GPU Computing Toolkit\CUDA\v11.0\include" -G --keep-dir x64\Debug -maxrregcount=0 --machine 64 --compile -cudart static -g -D_DEBUG -D_CONSOLE -D_UNICODE -DUNICODE -Xcompiler "/EHsc /W3 /nologo /Od /Fdx64\Debug\vc141.pdb /FS /Zi /RTC1 /MDd " -o x64\Debug\IntroToCUDA_1.cu.obj "D:\Program Files\nVidia\CUDA Samples\MySamples\IntroToCUDA_1\IntroToCUDA_1\IntroToCUDA_1.cu"
You can check your CC of your video card with the deviceQuery example you can find in CUDA Samples SDK
Adding to #RobertCrovella's answer:
When compiling with nvcc, you should always set appropriate flags to generate binary kernel images for the microarchitecture / compute capability you intend to run on. For example: -gencode arch=compute_${COMPUTE_CAPABILITY},code=compute_${COMPUTE_CAPABILITY},
with, say COMPUTE_CAPABILITY=61.
Read nvcc --help for more information on these flags (although, to be honest, it's a bit of a murky subject).

error generating .so file from .o files using JNI for CUDA in eclipse(ubuntu)

I am working on JNI based CUDA program for which i hava a java class that has main func i.e. jclass.java(containing native func jniEntry() declaration)
and jclass.h which is generated from javah . I have the JNI bridge cEntry.c which contains the native function implementation declared as
JNIEXPORT void JNICALL Java_jclass_jniEntry(JNIEnv* env, jobject thisObj)
the above function calls the CUDA host function i.e. jniEntry() in cudaprogram.h. The jniEntry() function then calls the device func contained in cudaprogram.cu
I can't seem generate the .so file from the generated .o files i.e. cudaprogram.o from cudaprogram.cu and cEntry.o from cEntry.c(which is the bridge for JNI i.e. jclass.java -> jclass.class(from javac) & jclass.h(from javah -jni)
My makefile is :
INCLUDES := -I$(CUDASDK_PATH)/inc -I$(CUDA_PATH)/include -I$(JDK_PATH)/include -I$(JDK_PATH)/include/linux -I.
LIBRARIES := -lrt -lm -lcudart -lcufft -lcublas -L$(CUDA_PATH)/lib64 -L.
JAVASRC_PATH := ../
NATIVESRC_PATH := ./
NVCC := /opt/cuda-6.5//bin/nvcc -ccbin g++
cujni1: cEntry.o cudaprog.o makefile jclass.h cudaprog.h
g++ $(INCLUDES) $(LIBRARIES) -v -m64 -shared -fPIC -o libcujni1.so cEntry.o cudaprog.o
cEntry.o: cEntry.c jclass.h cudaprog.o
gcc $(INCLUDES) -v -m64 -fPIC -o $# cEntry.c -c
cudaprog.o: cudaprog.cu jclass.h cudaprog.h
$(NVCC) $(INCLUDES) -v -m64 -o $# -c cudaprog.cu
run: build
$(EXEC) ./cujni1
jclass.h:jclass.class
javah -jni -classpath $(JAVASRC_PATH) jclass
jclass.class:
javac $(JAVASRC_PATH)/jclass.java
the files that are being generated without errors are jclass.class, jclass.h, cudaprogram.o, cEntry.o but the libcujni1.so is not getting generated as i get the errors something like
/usr/bin/ld: cudaprog.o: relocation R_X86_64_32 against `.rodata' can not be used when making a shared object; recompile with -fPIC
cudaprog.o: error adding symbols: Bad value
collect2: error: ld returned 1 exit status
make: *** [cujni1] Error 1
as you can see i am using nvcc to compile the .cu files so can't used -fPIC option because it returns error of |unknown option -fPIC"
For reference in needed i am attaching the other source files as well
jclass.java:
public class jclass {
static {
System.loadLibrary("cujni1");
}
private native void jniEntry();
public static void main(String[] args){
System.out.print("1:Hello" + "JNI CUder\n");
new jclass().jniEntry();
}
}
cEntry.c:
#include <jni.h>
#include "jclass.h"
#include "cudaprog.h"
JNIEXPORT void JNICALL Java_jclass_jniEntry(JNIEnv* env, jobject thisObj)
{
printf("2:cEntry.c-->Java_jclass_jniEntry!\n");
jniEntry();
return;
}
the generated jclass.h:
#ifndef CUDAPROG_H_
#define CUDAPROG_H_
#ifdef __cplusplus
extern "C" {
#endif
void jniEntry();
#ifdef __cplusplus
}
#endif
#endif /* CUDAPROG_H_ */
cudaprogram.cu:
// includes, system
#include <string.h>
#include <math.h>
#include "jclass.h"
#include "cudaprog.h"
#include <stdio.h>
#include <iostream>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <ctime>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#ifdef __cplusplus
extern "C"
{
#endif
#define LO -100.0f
#define HI 100.0f
#define BlockSize 16
#define VECTORLENGTH 100
#define MATRIXLENGTH 4000
__global__ void
calDistanceMatrixCUDA(float *Out, float *In)
{
// Block index
// int bx = blockIdx.x;
// int by = blockIdx.y;
// Thread index
// int tx = threadIdx.x;
// int ty = threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < MATRIXLENGTH && j < MATRIXLENGTH)
{
float fDim = 0.0f;
float fDist = 0.0f;
float(&InM)[4000][100] = *reinterpret_cast<float(*)[4000][100]>(In);
float(&OutM)[4000][4000] = *reinterpret_cast<float(*)[4000][4000]>(Out);
for (int k = 0; k < VECTORLENGTH; k++){//not blockSize because numElements = 100 < 128
fDim = InM[i][k] - InM[j][k];
fDim *= fDim;
fDist += fDim;
}
fDist = sqrt(fDist);
OutM[i][j] = fDist;
}
}
#ifdef __cplusplus
}
#endif
#ifdef __cplusplus
extern "C"
{
#endif
void jniEntry()
{
clock_t time1, time2, time3, time4;
double tDiff1, tDiff2, tDiff3, tDiff4;
unsigned int numElements = VECTORLENGTH;//dims
unsigned int numVectors = MATRIXLENGTH;
dim3 dimsVector(VECTORLENGTH, 1, 1);
dim3 dimsVectorArray(MATRIXLENGTH, VECTORLENGTH, 1);
dim3 dimsDistMatrix(MATRIXLENGTH, MATRIXLENGTH, 1);
size_t sizeVector = VECTORLENGTH * sizeof(float);
size_t sizeVectorArray = sizeVector * MATRIXLENGTH;
size_t sizeMatrix = MATRIXLENGTH * MATRIXLENGTH * sizeof(float);
unsigned int nSizeVector = dimsVector.x * dimsVector.y;
unsigned int mem_SizeVector = sizeof(float) * nSizeVector;
unsigned int nSizeVectorArray = dimsVectorArray.x * dimsVectorArray.y;
unsigned int mem_SizeVectorArray = sizeof(float) * nSizeVectorArray;
unsigned int nSizeDistMatrix = dimsDistMatrix.x * dimsDistMatrix.y;
unsigned int mem_SizeDistMatrix = sizeof(float) * nSizeDistMatrix;
float *distMatrix = (float *)malloc(mem_SizeDistMatrix);///Destination
/////////////////////////////////////////
///initialize Vector
time1 = clock();
float *featureV100 = (float *)malloc(mem_SizeVectorArray);
for (int i = 0; i < nSizeVectorArray; ++i)
{
featureV100[i] = LO + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (HI - LO)));;
// printf("i:%d, == %5.2f\n", i, featureV100[i]);
}
time2 = clock();
///////////////////////////
float *d_featureV100, *d_DistMatrix;
cudaError_t error;
error = cudaMalloc((void **)&d_featureV100, mem_SizeVectorArray);
if (error != cudaSuccess)
{
printf("cudaMalloc d_featureV100 returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **)&d_DistMatrix, mem_SizeDistMatrix);
if (error != cudaSuccess)
{
printf("cudaMalloc d_DistMatrix returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_featureV100, featureV100, mem_SizeVectorArray, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_featureV100,featureV100) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
//////////////////////
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Setup execution parameters
// int threads = /*128*/512; //sufficient for vector of 100 elements
dim3 threads(512); //sufficient for vector of 100 elements
// dim3 grid(MATRIXLENGTH / threads, MATRIXLENGTH / threads);
dim3 grid(512);
calDistanceMatrixCUDA<<<grid, threads>>>(d_DistMatrix, d_featureV100);
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msec = msecTotal ;
printf(
"Performance= Time= %.3f msec, WorkgroupSize= %d,%d,%d threads/block & %d,%d,%d blocks/grid\n",
msec,
threads.x,threads.y,threads.z,
grid.x,grid.y,grid.z);
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to launch calDistanceMatrixCUDA (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMemcpy(distMatrix, d_DistMatrix, mem_SizeDistMatrix, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to copy d_DistMatrix from device to host distMatrix (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaFree(d_featureV100);
cudaFree(d_DistMatrix);
free(featureV100);
free(distMatrix);
error = cudaDeviceReset();
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("Done\n");
}
#ifdef __cplusplus
}
#endif
Needless to say the above cudaprogam.cu runs coorectly without errors when run as a CUDA application i.e. without JNI
Please guide me in regards to using the correct options in the makefile as i am a newbie in creating makefiles. Thanks.
Edit:
after the changes you mentioned below in the answer. the ldd commands gives `
ldd libcujni1.so
linux-vdso.so.1 => (0x00007ffd919b6000)
libcudart.so.6.5 => /opt/cuda-6.5//lib64/libcudart.so.6.5 (0x00007f47bde41000)
libstdc++.so.6 => /usr/lib/x86_64-linux-gnu/libstdc++.so.6 (0x00007f47bdb3d000)
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f47bd778000)
libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f47bd574000)
libpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 (0x00007f47bd356000)
librt.so.1 => /lib/x86_64-linux-gnu/librt.so.1 (0x00007f47bd14e000)
libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007f47bce48000)
/lib64/ld-linux-x86-64.so.2 (0x00007f47be297000)
libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007f47bcc32000)
and the make command line shows
make all
javac ..//jclass.java
javah -jni -classpath ../ jclass
/opt/cuda-6.5//bin/nvcc -ccbin g++ -I/home/faizan/workspace/common//inc -I/opt/cuda-6.5//include -I/usr/lib/jvm/jdk1.8.0_60//include -I/usr/lib/jvm/jdk1.8.0_60//include/linux -I. -Xcompiler -fPIC -m64 -o cudaprog.o -c *.cu # -v
cudaprog.cu(89): warning: variable "time1" was set but never used
cudaprog.cu(89): warning: variable "time2" was set but never used
cudaprog.cu(89): warning: variable "time3" was declared but never referenced
cudaprog.cu(89): warning: variable "time4" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff1" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff2" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff3" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff4" was declared but never referenced
cudaprog.cu(92): warning: variable "numElements" was declared but never referenced
cudaprog.cu(93): warning: variable "numVectors" was declared but never referenced
cudaprog.cu(100): warning: variable "sizeVectorArray" was declared but never referenced
cudaprog.cu(101): warning: variable "sizeMatrix" was declared but never referenced
cudaprog.cu(104): warning: variable "mem_SizeVector" was declared but never referenced
cudaprog.cu(89): warning: variable "time1" was set but never used
cudaprog.cu(89): warning: variable "time2" was set but never used
cudaprog.cu(89): warning: variable "time3" was declared but never referenced
cudaprog.cu(89): warning: variable "time4" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff1" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff2" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff3" was declared but never referenced
cudaprog.cu(90): warning: variable "tDiff4" was declared but never referenced
cudaprog.cu(92): warning: variable "numElements" was declared but never referenced
cudaprog.cu(93): warning: variable "numVectors" was declared but never referenced
cudaprog.cu(100): warning: variable "sizeVectorArray" was declared but never referenced
cudaprog.cu(101): warning: variable "sizeMatrix" was declared but never referenced
cudaprog.cu(104): warning: variable "mem_SizeVector" was declared but never referenced
g++ -I/home/faizan/workspace/common//inc -I/opt/cuda-6.5//include -I/usr/lib/jvm/jdk1.8.0_60//include -I/usr/lib/jvm/jdk1.8.0_60//include/linux -I. -shared -fPIC -m64 -o cEntry.o cEntry.c jclass.h cudaprog.h # -shared -fPIC -Xlinker -znoexecstack -Xlinker -shared -v -g
g++ -I/home/faizan/workspace/common//inc -I/opt/cuda-6.5//include -I/usr/lib/jvm/jdk1.8.0_60//include -I/usr/lib/jvm/jdk1.8.0_60//include/linux -I. -m64 -shared -fPIC -o libcujni1.so cEntry.o cudaprog.o -L/opt/cuda-6.5//lib64 -Wl,-rpath=/opt/cuda-6.5//lib64 -lcufft -lcublas -lcudart -lcuda -lrt -lm # -v
also added Library folders i.e.
LIBRARIES := -L$(CUDA_PATH)/lib64 -Wl,-rpath=$(CUDA_PATH)/lib64 -L$(CUDA_PATH)/lib64/stubs -Wl,-rpath=$(CUDA_PATH)/lib64/stubs -lcufft -lcublas -lcudart -lcuda -lrt -lm
the error currently is (after running the output command in jclass.java main()
Exception in thread "main" 1:HelloJNI CUder
java.lang.UnsatisfiedLinkError: jclass.jniEntry()V
at jclass.jniEntry(Native Method)
at jclass.main(jclass.java:22)
Posting a proper answer since comments are not meant for that...
Your first problem was that you were missing -Xcompiler - fpic in the nvcc compiler option list.
Your second issue is that your dynamic library is linked with neither libcudart nor libcuda. That is probably a problem with the Makefile, or the order of which lib are linked.
I would try something like-L$(CUDA_PATH)/lib64 -Wl,-rpath=$(CUDA_PATH)/lib64 -lcufft -lcublas -lcudart -lcuda -lrt -lm as link option...
Then check with ldd libcujni1.so that libcudart and libcuda are indeed listed there.
And please post a copy of your actual link command line (the one executed when you type make) and the result of ldd libcujni1.so in you initial question.
EDIT: I think I've got it... Just reread your Makefile and you should change this:
cujni1: cEntry.o cudaprog.o makefile jclass.h cudaprog.h
g++ $(INCLUDES) $(LIBRARIES) -v -m64 -shared -fPIC -o libcujni1.so cEntry.o cudaprog.o
into this:
cujni1: cEntry.o cudaprog.o makefile jclass.h cudaprog.h
g++ $(INCLUDES) -v -m64 -shared -fPIC -o libcujni1.so cEntry.o cudaprog.o $(LIBRARIES)
Notice the change of place for $(LIBRARIES)... Order matters (a lot) when it comes to linking.

Call cublas in a kernel

I want to use Zgemv in parallel.
__global__ void S_Cphir(cuDoubleComplex *S,cuDoubleComplex *A,cuDoubleComplex *B, int n,int l)
{
....
cublasZgemv(handle,CUBLAS_OP_N,n,n,&alpha,S+i*n*n,n,A+n*i,1,&beta,B+i*n,1);}
void S_Cphir_(cuDoubleComplex *S,cuDoubleComplex *A,cuDoubleComplex *B, int n,int l){
dim3 grid = dim3(1,1,1);
dim3 block = dim3(32,1,1);
S_Cphir<<<grid,block>>>(S,A,B,n,l);}
my compile command is
nvcc -c -arch=compute_30 -code=sm_35 time_propagation_cublas.cu --relocatable-device-code true
nvcc -o ./main.v2 time_propagation_cublas.o -lcublas
The first line is work. But the second line is wrong!!
In function`__sti____cudaRegisterAll_58_tmpxft_000032b7_00000000_6_time_propagation_cublas_cpp1_ii_0d699356()';tmpxft_000032b7_00000000-3_time_propagation_cublas.cudafe1.cpp:(.text+0x17a4):
undefined reference to `__cudaRegisterLinkedBinary_58_tmpxft_000032b7_00000000_6_time_propagation_cublas_cpp1_ii_0d699356'
collect2: ld returned 1 exit status
I search the "cudaRegisterLinkedBinary" but I have nothing!!
I know nvcc support to call cublas in kernel.
Use the CUBLAS Device Library sample code as your reference. On a standard CUDA 5.5 install, you'll find it at:
/usr/local/cuda/samples/7_CUDALibraries/simpleDevLibCUBLAS
Referring to the Makefile in that directory, your compile commands should be like this:
nvcc -arch=sm_35 -rdc=true -o main.v2 time_propagation_cublas.cu -lcublas -lcublas_device -lcudadevrt

CUDA with C/C++ Compilation fails

I am trying to integrate CUDA code with my existing C++ application. As instructed on some web side, I need to have a "file.cu" where in I have a wrapper function which does the memory allocation on the GPU and launch kernel. I followed that advise but, I am not able to compile the code now.
file.cu
#include <cuda.h>
#include <stdio.h>
void preComputeCorrelation_gpu( int * d )
{
//I shall write the kernel later once I am confirmed that CUDA code works
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
printf( "name = %s\n", prop.name );
}
main.cpp
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define __CUDA_SUPPORT__
#ifdef __CUDA_SUPPORT__
// Defination to be found in "cudaWrap.cu"
extern void preComputeCorrelation_gpu( int * d );
#endif
int main()
{
//code to read d from the file and other initialization
int * d;
.
.
#ifdef __CUDA_SUPPORT__
fprintf( stderr, "GPU Computation starts" );
// Defination to be found in "cudaWrap.cu"
preComputeCorrelation_gpu( d );
#else
fprintf( stderr, "CPU Computation starts" );
preComputeCorrelation( d );
#endif
.
.
//more code
return 0 ;
}
Now, I put following commands to compile the code
$ nvcc -c cudaWrap.cu <br/>
$ g++ -I /usr/local/cuda-5.0/include -L /usr/local/cuda-5.0/lib -o GA_omp GA_dev_omp.cpp main_omp.cpp data_stats.cpp cudaWrap.o
Compilation fails and I get the following message after the 2nd command. Although the 1st command works.
cudaWrap.o: In function `preComputeCorrelation_gpu(DataSet*)':
tmpxft_00001061_00000000-3_cudaWrap.cudafe1.cpp:(.text+0x2f): undefined reference to `cudaGetDeviceProperties'
cudaWrap.o: In function `__cudaUnregisterBinaryUtil()':
tmpxft_00001061_00000000-3_cudaWrap.cudafe1.cpp:(.text+0x6b): undefined reference to `__cudaUnregisterFatBinary'
cudaWrap.o: In function `__sti____cudaRegisterAll_43_tmpxft_00001061_00000000_6_cudaWrap_cpp1_ii_f8a043c5()':
tmpxft_00001061_00000000-3_cudaWrap.cudafe1.cpp:(.text+0x8c): undefined reference to `__cudaRegisterFatBinary'
collect2: ld returned 1 exit status
How to I sort this out really?
The solution to this problem is that linking of the ordinary c++ code and cuda code needs to be done with libcudart.so
Compilation should look some like --
$ nvcc -c cudaWrap.cu
$ g++ -lcudart -I /usr/local/cuda-5.0/include -L /usr/local/cuda-5.0/lib -o GA_omp GA_dev_omp.cpp main_omp.cpp data_stats.cpp cudaWrap.o
In this case, cudaWrap.cu contains the cuda code. main_omp.cpp contains the main() and there are some other application files that need compiling too.