I have a kernel which calls another empty kernel. However when the calling kernel calls cudaDeviceSynchronize(), the kernel crashes and the execution goes straight to the host. Memory checker does not report of any memory access issues.
Does anyone know what could be the reason for such uncivilized behavior?
The crash seems to happen only if I run the code from the debugger (Visual Studio -> Nsight -> Start CUDA Debugging).
The crash does not happen every time I run the code - sometimes it crashes, and sometimes it finishes ok.
Here is the complete code to reproduce the problem:
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#define CUDA_RUN(x_, err_) {cudaStatus = x_; if (cudaStatus != cudaSuccess) {fprintf(stderr, err_ " %d - %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); int k; scanf("%d", &k); goto Error;}}
struct computationalStorage {
float rotMat;
};
__global__ void drawThetaFromDistribution() {}
__global__ void chainKernel() {
computationalStorage* c = (computationalStorage*)malloc(sizeof(computationalStorage));
if (!c) printf("malloc error\n");
c->rotMat = 1.0f;
int n = 1;
while (n < 1000) {
cudaError_t err;
drawThetaFromDistribution<<<1, 1>>>();
if ((err = cudaGetLastError()) != cudaSuccess)
printf("drawThetaFromDistribution Sync kernel error: %s\n", cudaGetErrorString(err));
printf("0");
if ((err = cudaDeviceSynchronize()) != cudaSuccess)
printf("drawThetaFromDistribution Async kernel error: %s\n", cudaGetErrorString(err));
printf("1\n");
++n;
}
free(c);
}
int main() {
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
CUDA_RUN(cudaSetDevice(0), "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// Set to use on chip memory 16KB for shared, 48KB for L1
CUDA_RUN(cudaDeviceSetCacheConfig ( cudaFuncCachePreferL1 ), "Can't set CUDA to use on chip memory for L1");
// Set a large heap
CUDA_RUN(cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024 * 10 * 192), "Can't set the Heap size");
chainKernel<<<10, 192>>>();
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
printf("Something was wrong! Error code: %d", cudaStatus);
}
CUDA_RUN(cudaDeviceReset(), "cudaDeviceReset failed!");
Error:
int k;
scanf("%d",&k);
return 0;
}
If all goes well I expect to see:
00000000000000000000000....0000000000000001
1
1
1
1
....
This is what I get when everything works ok. When it crashes however:
000000000000....0000000000000Something was wrong! Error code: 30
As you can see the statement err = cudaDeviceSynchronize(); does not finish, and the execution goes straight to the host, where its cudaDeviceSynchronize(); fails with unknown error code (30 = cudaErrorUnknown).
System: CUDA 5.5, NVidia-Titan(Headless), Windows 7x64, Win32 application.
UPDATE: additional Nvidia card driving the display, Nsight 3.2.0.13289.
That last fact may have been the critical one. You don't mention which version of nsight VSE you are using nor your exact machine config (e.g. are there other GPUs in the machine, if so, which is driving the display?), but at least up till recently it was not possible to debug a dynamic parallelism application in single-GPU mode with nsight VSE.
The current feature matrix also suggests that single-GPU CDP debugging is not yet supported.
Probably one possible workaround in your case would be to add another GPU to drive the display, and make the Titan card headless (i.e. don't attach any monitors and don't extend the windows desktop onto that GPU).
I ran your application with and without cuda-memcheck and it does not appear to me that there are any problems with it.
Related
I have been trying to see whether we can cudaMalloc the amount of free memory returned by cudaMemGetInfo. But I encounter a strange problem: the cudaMalloc seems to run before the cudaMemGetInfo, as a result of which the latter returns available memory as zero. How do I enforce no reordering of the calls?
Here is the code:
#include <stdio.h>
#include <cuda.h>
#define cudaMallocError(s) error = cudaGetLastError();\
if (error != cudaSuccess)\
{\
printf("CUDA Error: %s\n", cudaGetErrorString(error));\
printf("Failed to cudaMalloc %s\n", s);\
exit(1);\
}
int main()
{
size_t f, t;
int * x;
cudaError_t error;
cudaMemGetInfo(&f, &t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cudaMemGetInfo went wrong!\n");
printf("Error: %s\n", cudaGetErrorString(error));
}
printf("Available memory = %ld\n", f);
cudaDeviceSynchronize();
cudaMalloc(&x, f);
cudaMallocError("x");
cudaFree(x);
printf("Success\n");
return 0;
}
It is triggering both the error-handling codes. This is the output:
cudaMemGetInfo went wrong!
Error: out of memory
Available memory = 0
CUDA Error: out of memory
Failed to cudaMalloc x
But if I altogether remove the call to cudaMalloc, then it shows available memory as some non-zero value, clearly indicating that it is calling cudaMalloc before cudaMemGetInfo, even though the latter appears before the former in program order. Why is this so?
There is no reordering. cudaMalloc is executed after cudaMemGetInfo.
You are probably just observing physical memory allocation granularity. The requested bytes are rounded up to physical memory page size. However, if this results in more physical memory requested than available, the allocation fails.
On my machine, it seems to be sufficient to round down the free bytes to the next smallest multiple of 2 megabytes.
What I was trying to do is modifying a variable which resides in mapped memory that would cause the main program to exit.
But instead of this the main program keeps spinning on while (var == 0) ; line. I don't know how the new value could be flushed out so it would be visible on the host side too.
Btw. the variable is declared as volatile everywhere and I tried using the __threadfence_system() function with no success.
The host -> device direction works well.
System: Windows 7 x64, driver 358.50, GTX 560
Here is the piece of code that I can't get working:
static void handleCUDAError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define CUDA_ERROR_CHECK(err) (handleCUDAError(err, __FILE__, __LINE__ ))
__global__ void echoKernel(volatile int* semaphore)
{
*semaphore = 1;
__threadfence_system();
}
int main()
{
CUDA_ERROR_CHECK(cudaSetDevice(0));
CUDA_ERROR_CHECK(cudaSetDeviceFlags(cudaDeviceMapHost));
volatile int var = 0;
volatile int *devptr;
CUDA_ERROR_CHECK(cudaHostRegister((int*)&var, sizeof (int), cudaHostRegisterMapped));
CUDA_ERROR_CHECK(cudaHostGetDevicePointer(&devptr, (int*)&var, 0));
echoKernel <<< 1, 1 >>> (devptr);
while (var == 0) ;
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
CUDA_ERROR_CHECK(cudaHostUnregister((int*)&var));
CUDA_ERROR_CHECK(cudaDeviceReset());
return 0;
}
When I run your code on linux, it runs as-is without issue.
However on windows, there is a problem around WDDM command batching. In effect, your kernel does not launch and is not getting launched before you enter the while-loop that hangs.
The WDDM command queue is a queue of commands that will eventually go to the GPU device. Various events will cause this queue to be "flushed" and the contents to be delivered as a "batch" of commands to the GPU.
Various cuda runtime API calls may effectively force the "flushing" of the command queue, such as cudaDeviceSynchronize() or cudaMemcpy(). However after the kernel launch, you are not issuing any runtime API calls before entering your while-loop. As a result, in this scenario it seems that the kernel call is getting "stuck" in the queue and never "flushed".
You can work around this in a variety of ways, for example by recording an event after the launch of the kernel and then querying the status of that event. This will have the effect of flushing the queue, which will launch the kernel.
Here's an example modification of your code that works for me:
#include <stdio.h>
static void handleCUDAError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define CUDA_ERROR_CHECK(err) (handleCUDAError(err, __FILE__, __LINE__ ))
__global__ void echoKernel(volatile int* semaphore)
{
*semaphore = 1;
__threadfence_system();
}
int main()
{
CUDA_ERROR_CHECK(cudaSetDevice(0));
CUDA_ERROR_CHECK(cudaSetDeviceFlags(cudaDeviceMapHost));
volatile int var = 0;
volatile int *devptr;
CUDA_ERROR_CHECK(cudaHostRegister((int*)&var, sizeof(int), cudaHostRegisterMapped));
CUDA_ERROR_CHECK(cudaHostGetDevicePointer(&devptr, (int*)&var, 0));
cudaEvent_t my_event;
CUDA_ERROR_CHECK(cudaEventCreate(&my_event));
echoKernel << < 1, 1 >> > (devptr);
CUDA_ERROR_CHECK(cudaEventRecord(my_event));
cudaEventQuery(my_event);
while (var == 0);
CUDA_ERROR_CHECK(cudaDeviceSynchronize());
CUDA_ERROR_CHECK(cudaHostUnregister((int*)&var));
CUDA_ERROR_CHECK(cudaDeviceReset());
return 0;
}
Tested on CUDA 7.5, Driver 358.50, Win7 x64 release project, GTX460M.
Note that we don't wrap the cudaEventQuery call in a standard error checker, because the expected behavior for it is to return a non-zero status when the event has not been completed yet.
I cannot allocate even only 4 bytes of memory with cudaMallocHost() because of 'out of memory' error. I tried cudaSetDevice(0), cudaDeviceSynchronize(), cudaThreadSynchronize(), and cudaFree(0) at the very first of my code for initializing, but they don't work.
I think this link : cudaMalloc always gives out of memory - has the answer what I want but I cannot understand it. How can I solve this problem?
Here is my full code.
/* test.cu */
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <assert.h>
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
int main()
{
cudaSetDevice(0);
cudaDeviceSynchronize();
cudaThreadSynchronize();
cudaFree(0);
int *test_ptr;
checkCuda( cudaMallocHost((void **)&test_ptr, sizeof(int)) );
cudaFreeHost(test_ptr);
printf("Test Success.\n");
return 0;
}
I compiled with this instruction:
nvcc test.cu -o test
and when I execute this:
me#me:~$ ./test
CUDA Runtime Error: out of memory
test: test.cu:10: cudaError_t checkCuda(cudaError_t): Assertion `result == cudaSuccess' failed.
Aborted
My cuda version is 5.0, I'll post more specific device information if you need.
I just rebooted my system, and the error changed :
me#me:~$ nvidia-smi -q
NVIDIA: could not open the device file /dev/nvidiactl (No such file or directory).
NVIDIA-SMI has failed because it couldn't communicate with NVIDIA driver. Make sure that latest NVIDIA driver is installed and running.
Sorry for my unconsciousness, I'll finish this thread because it became another problem. Thank you for your replies.
I am trying to profile some CUDA Rodinia benchmarks, in terms of their SM and memory utilization, power consumption etc. For that, I simultaneously execute the benchmark and the profiler which essentially spawns a pthread to profile the GPU execution using NVML library.
The issue is that the execution time of a benchmark, is much higher( about 3 times) in case I do not invoke the profiler along with it, than the case when the benchmark is executing with the profiler. The frequency scaling governor for the CPU is userspace so I do not think that frequency of the CPU is changing. Is it due to the flickering in GPU frequency?
Below is the code for the profiler.
#include <pthread.h>
#include <stdio.h>
#include "nvml.h"
#include "unistd.h"
#define NUM_THREADS 1
void *PrintHello(void *threadid)
{
long tid;
tid = (long)threadid;
// printf("Hello World! It's me, thread #%ld!\n", tid);
nvmlReturn_t result;
nvmlDevice_t device;
nvmlUtilization_t utilization;
nvmlClockType_t jok;
unsigned int device_count, i,powergpu,clo;
char version[80];
result = nvmlInit();
result = nvmlSystemGetDriverVersion(version,80);
printf("\n Driver version: %s \n\n", version);
result = nvmlDeviceGetCount(&device_count);
printf("Found %d device%s\n\n", device_count,
device_count != 1 ? "s" : "");
printf("Listing devices:\n");
result = nvmlDeviceGetHandleByIndex(0, &device);
while(1)
{
result = nvmlDeviceGetPowerUsage(device,&powergpu );
result = nvmlDeviceGetUtilizationRates(device, &utilization);
printf("\n%d\n",powergpu);
if (result == NVML_SUCCESS)
{
printf("%d\n", utilization.gpu);
printf("%d\n", utilization.memory);
}
result=nvmlDeviceGetClockInfo(device,NVML_CLOCK_SM,&clo);
if(result==NVML_SUCCESS)
{
printf("%d\n",clo);
}
usleep(500000);
}
pthread_exit(NULL);
}
int main (int argc, char *argv[])
{
pthread_t threads[NUM_THREADS];
int rc;
long t;
for(t=0; t<NUM_THREADS; t++){
printf("In main: creating thread %ld\n", t);
rc = pthread_create(&threads[t], NULL, PrintHello, (void *)t);
if (rc){
printf("ERROR; return code from pthread_create() is %d\n", rc);
exit(-1);
}
}
/* Last thing that main() should do */
pthread_exit(NULL);
}
With your profiler running, the GPU(s) are being pulled out of their sleep state (due to the access to the nvml API, which is querying data from the GPUs). This makes them respond much more quickly to a CUDA application, and so the application appears to run "faster" if you time the entire application execution (e.g. using the linux time command).
One solution is to place the GPUs in "persistence mode" with the nvidia-smi command (use nvidia-smi --help to get command line help).
Another solution would be to do the timing from within the application, and exclude the CUDA start-up time from the timing measurement, perhaps by executing a cuda command such as cudaFree(0); prior to the start of timing.
I have allocated page-aligned memory on host using posix_memalign. The call to posix_memalign does not return any error. However, using this pointer as argument to cudaHostRegister gives me an 'invalid argument' error. What could be the issue?
CUDA API version: 4.0
gcc version: 4.4.5
GPU compute capability: 2.0
The memory allocation is done in the application code, and a pointer is passed to a library routine.
Application code snippet:
if(posix_memalign((void **)&h_A, getpagesize(), n * n * sizeof(float))) {
printf("Error allocating aligned memory for A\n");
return 1;
}
Shared library code snippet:
if((ret = cudaSetDeviceFlags(cudaDeviceMapHost)) != cudaSuccess) {
fprintf(stderr, "Error setting device flag: %s\n",
cudaGetErrorString(ret));
return NULL;
}
if((ret = cudaHostRegister(h_A, n2 * sizeof(float),
cudaHostRegisterMapped)) != cudaSuccess) {
fprintf(stderr, "Error registering page-locked memory for A: %s\n",
cudaGetErrorString(ret));
return NULL;
}
I cannot reproduce this. If I take the code snippets you supplied and make them into a minimal executable:
#include <unistd.h>
#include <stdlib.h>
#include <malloc.h>
#include <stdio.h>
int main(void)
{
const int n2 = 100 * 100;
float *h_A;
cudaError_t ret;
if(posix_memalign((void **)&h_A, getpagesize(), n2 * sizeof(float))) {
printf("Error allocating aligned memory for A\n");
return -1;
}
if((ret = cudaSetDeviceFlags(cudaDeviceMapHost)) != cudaSuccess) {
fprintf(stderr, "Error setting device flag: %s\n",
cudaGetErrorString(ret));
return -1;
}
if((ret = cudaHostRegister(h_A, n2 * sizeof(float),
cudaHostRegisterMapped)) != cudaSuccess) {
fprintf(stderr, "Error registering page-locked memory for A: %s\n",
cudaGetErrorString(ret));
return -1;
}
return 0;
}
it compiles and runs without error under both CUDA 4.2 and CUDA 5.0 on a 64 bit linux host with the 304.54 driver. I would, therefore, conclude that either you have a broken CUDA installation or your code has a problem somewhere you haven't shown us.
Perhaps you can compile and run this code exactly as I posted and see what happens. If it works, it might help narrow down what it is that might be going wrong here.