I wrote a test program to test the following idea: (1) a cuda stream copies data to gpu. The copy is done in a pthread. (2) a second cuda stream reads and processes data. (3) One more data is copied by first stream only if the previous data is processed by the second stream.
However, it does not work: only copy first data and then waiting there.
#include "cuda.h"
#include <iostream>
#include <pthread.h>
const int UNPROCESSED = 1;
const int PROCESSED = 2;
const int DONE = 3;
const int RUNNING= 0;
const int NUM_OF_DATA = 100;
const int NUM_OF_BLOCKS = 1;
const int THREADS_PER_BLOCK = 1;
//int data_states[NUM_OF_DATA];
cudaStream_t cuda_stream[2];
volatile int* process_state;
volatile int* d_process_state;
volatile int* d_copier_state;
int* d_data_state;
int* h_data_states;
cudaError_t cuda_status;
using namespace std;
void* copy_data(void* arg){
int i=0;
//cout << "in copy_data" << endl;
while(i < NUM_OF_DATA){
if (*process_state != UNPROCESSED){
cout << "Now copy data " << i << " with state = " << h_data_states[i] << endl;
*process_state = UNPROCESSED;
cuda_status = cudaMemcpyAsync(d_data_state, &h_data_states[i], sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
i++;
}
}
int copier_state = DONE;
cudaMemcpyAsync((void*) d_copier_state, &copier_state, sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
}
__global__ void process_data(volatile int* data_state, volatile int* process_state, volatile int* copier_state){
int i = 0;
printf(" i = %d\n", i);
while(*copier_state != DONE){
printf(" i = %d, copier_state = %d, data_state = %d\n", i, *copier_state, *data_state);
if(*data_state == UNPROCESSED){
printf("now processing data %d\n", i);
i++;
// process data here, skipped
*process_state = PROCESSED;
*data_state = PROCESSED;
//__threadfence_system();
}
}
printf("process_data is done\n");
}
int main(int argc, char **argv){
int i;
cudaSetDeviceFlags(cudaDeviceMapHost);
cuda_status = cudaMallocHost((void**) &process_state, NUM_OF_BLOCKS*sizeof(int), cudaHostAllocMapped);
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
cudaHostGetDevicePointer((int**) &d_process_state, (int*) process_state, 0);
cuda_status = cudaMalloc((void**) &d_copier_state, NUM_OF_BLOCKS*sizeof(int));
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
cudaMemset((void*)d_copier_state, RUNNING, sizeof(int));
cuda_status = cudaMallocHost((void**) &h_data_states, NUM_OF_DATA*sizeof(int), 0);
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
for(i = 0; i < NUM_OF_DATA; i++){
h_data_states[i] = UNPROCESSED;
}
cudaStreamCreate(&cuda_stream[0]);
cudaStreamCreate(&cuda_stream[1]);
pthread_t thread;
int thread_state = pthread_create(&thread, NULL, ©_data, h_data_states);
if(thread_state){
cout << "Error: unable to create thread (produce_instances), "<< thread_state << endl;
exit(-1);
}
//cout << "Starting kernel" << endl;
process_data<<<NUM_OF_BLOCKS, THREADS_PER_BLOCK, 0, cuda_stream[1]>>>(d_data_state, d_process_state, d_copier_state);
cudaDeviceSynchronize();
cudaFree(d_data_state);
cudaFree((void*) d_copier_state);
cudaFreeHost((void*) process_state);
return 0;
}
You never allocate d_data_state in any way. It is a NULL pointer throughout your program.
Therefore the usage here is invalid:
cuda_status = cudaMemcpyAsync(d_data_state, &h_data_states[i], sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
And when I run your program, I get the error printout from the next line of code.
Since your kernel also uses d_data_state (which is an invalid pointer) I get various invalid global read errors if I run your code with cuda-memcheck.
Since you have not allocated anything for d_data_state, your code cannot possibly work.
You had several other issues in your code as well. As just one example:
int copier_state = DONE;
cudaMemcpyAsync((void*) d_copier_state, &copier_state, sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
In order for cudaMemcpyAsync to work as expected (i.e. be asynchronous, and overlap with other stream activity) the host memory must be a pinned memory area. int copier_state = DONE; does not create a pinned allocation, so copying from that breaks the asynchronous overlap of the cudaMemcpyAsync operation.
Here is a version of your code that works correctly for me (now updated with some additional guards against race conditions):
#include <iostream>
#include <pthread.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int UNPROCESSED = 1;
const int PROCESSED = 2;
const int DONE = 3;
const int RUNNING= 0;
const int NUM_OF_DATA = 100;
const int NUM_OF_BLOCKS = 1;
const int THREADS_PER_BLOCK = 1;
//int data_states[NUM_OF_DATA];
cudaStream_t cuda_stream[2];
volatile int* process_state;
volatile int* d_process_state;
volatile int* d_copier_state;
int* d_data_state;
int* h_data_states;
int* h_copier_state;
cudaError_t cuda_status;
using namespace std;
void* copy_data(void* arg){
int i=0;
cudaSetDevice(0);
//cout << "in copy_data" << endl;
while(i < NUM_OF_DATA){
if (*process_state != UNPROCESSED){
// cout << "Now copy data " << i << " with state = " << h_data_states[i] << endl;
*process_state = UNPROCESSED;
cudaMemcpyAsync(d_data_state, &(h_data_states[i]), sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
cudaStreamSynchronize(cuda_stream[0]);
cudaCheckErrors("thread cudaMemcpyAsync fail");
//*process_state = UNPROCESSED;
i++;
}
}
*h_copier_state = DONE;
cudaMemcpyAsync((void *)d_copier_state, h_copier_state, sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
cudaCheckErrors("thread cudaMemcpyAsync 2 fail");
// cout << "Thread finished" << endl;
return NULL;
}
__global__ void process_data(volatile int* data_state, volatile int* process_state, volatile int* copier_state){
int i = 0;
//printf(" i = %d\n", i);
while(*copier_state != DONE){
//printf(" i = %d, copier_state = %d, data_state = %d\n", i, *copier_state, *data_state);
if(*data_state == UNPROCESSED){
//printf("now processing data %d\n", i);
i++;
// process data here, skipped
*data_state = PROCESSED;
__threadfence_system();
*process_state = PROCESSED;
__threadfence_system();
}
}
// printf("process_data is done\n");
}
int main(int argc, char **argv){
int i;
cudaSetDevice(0);
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaMallocHost((void**) &process_state, NUM_OF_BLOCKS*sizeof(int), cudaHostAllocMapped);
cudaCheckErrors("cudaMallocHost 1 fail");
cudaHostGetDevicePointer((int**) &d_process_state, (int*) process_state, 0);
cudaMalloc((void**) &d_copier_state, sizeof(int));
cudaCheckErrors("cudaMalloc 1 fail");
cudaMemset((void*)d_copier_state, RUNNING, sizeof(int));
cudaMallocHost((void**) &h_copier_state, sizeof(int), 0);
cudaCheckErrors("cudaMallocHost 3 fail");
*h_copier_state = RUNNING;
cudaMallocHost((void**) &h_data_states, NUM_OF_DATA*sizeof(int), 0);
cudaCheckErrors("cudaMallocHost 2 fail");
for(i = 0; i < NUM_OF_DATA; i++){
h_data_states[i] = UNPROCESSED;
}
cudaMalloc((void**) &d_data_state, sizeof(int));
cudaCheckErrors("cudaMalloc 2 fail");
cudaMemcpy((void*)d_data_state, &(h_data_states[0]), sizeof(int), cudaMemcpyHostToDevice);
cudaStreamCreate(&cuda_stream[0]);
cudaStreamCreate(&cuda_stream[1]);
pthread_t thread;
int thread_state = pthread_create(&thread, NULL, ©_data, NULL);
if(thread_state){
cout << "Error: unable to create thread (produce_instances), "<< thread_state << endl;
exit(-1);
}
//cout << "Starting kernel" << endl;
process_data<<<NUM_OF_BLOCKS, THREADS_PER_BLOCK, 0, cuda_stream[1]>>>(d_data_state, d_process_state, d_copier_state);
cudaDeviceSynchronize();
return 0;
}
As an aside, it's not necessary to have all the complexity of pthreads to run one extra thread. After the cuda kernel launch, all of your pthread-code could have been inserted in the main host thread, and your program would still work correctly. The host thread runs asynchronously to, and in parallel to, the device kernel, after a kernel launch.
Related
I need to sum up a vector, which is longer than the number of threads in a cuda block. So I use multi blocks to handle the task. I sum up a part of the vector within each block, after which I have two options, one is to use atomicAdd to combine the sum of each block, and the other is to write the result in some global memory and launch another kernel to sum up. Which method do you recommand me to use ?
Is cuda atomicAdd operation faster than launch another kernel when we do reduce sum?
For the following test case, using the code lifted from slides 16 and 17 in the training here (video), it seems to be a bit faster. The difference is about the cost of kernel launch overhead, which makes sense:
$ cat t1834.cu
#include <time.h>
#include <sys/time.h>
#include <iostream>
const int BLOCK_SIZE = 1024;
template <typename T>
__global__ void reduce(const T * __restrict__ gdata, T * __restrict__ out, const int N){
__shared__ T sdata[BLOCK_SIZE];
int tid = threadIdx.x;
sdata[tid] = 0.0;
size_t idx = threadIdx.x+blockDim.x*blockIdx.x;
while (idx < N) { // grid stride loop to load data
sdata[tid] += gdata[idx];
idx += gridDim.x*blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s) // parallel sweep reduction
sdata[tid] += sdata[tid + s];
}
if (tid == 0)
#ifndef USE_ATOMIC
out[blockIdx.x] = sdata[0];
#else
atomicAdd(out, sdata[0]);
#endif
}
#define USECPSEC 1000000ULL
unsigned long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
typedef float mt;
const int ds = 1048576*8;
int main(){
mt *h_gdata, *d_gdata, *h_out, *d_out;
h_gdata = new mt[ds];
cudaMalloc(&d_gdata, ds*sizeof(mt));
const int nblocks = 160;
h_out = new mt[1];
cudaMalloc(&d_out, nblocks*sizeof(mt));
for (int i = 0; i < ds; i++) h_gdata[i] = 1;
cudaMemcpy(d_gdata, h_gdata, ds*sizeof(mt), cudaMemcpyHostToDevice);
reduce<<<nblocks, BLOCK_SIZE>>>(d_gdata, d_out, ds); // warm-up
cudaDeviceSynchronize();
cudaMemset(d_out, 0, sizeof(mt));
unsigned long long dt = dtime_usec(0);
reduce<<<nblocks, BLOCK_SIZE>>>(d_gdata, d_out, ds);
#ifndef USE_ATOMIC
reduce<<<1, BLOCK_SIZE>>>(d_out, d_out, nblocks);
#endif
cudaDeviceSynchronize();
dt = dtime_usec(dt);
cudaMemcpy(h_out, d_out, sizeof(mt), cudaMemcpyDeviceToHost);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; return 0;}
if (h_out[0] != ds) {std::cout << "Reduce Error: " << h_out[0] << std::endl; return 0;}
std::cout << "Timing: " << dt << "us" << std::endl;
return 0;
}
$ nvcc -lineinfo -arch=sm_70 -O3 -o t1834 t1834.cu -std=c++14 -Wno-deprecated-gpu-targets
$ ./t1834
Timing: 69us
$ nvcc -lineinfo -arch=sm_70 -O3 -o t1834 t1834.cu -std=c++14 -Wno-deprecated-gpu-targets -DUSE_ATOMIC
$ ./t1834
Timing: 66us
$
(CUDA 11.2, Centos 7, V100 GPU)
Thank you very much for reading my threads.
I am doing CUDA work, but keep getting cudaDeviceSynchronize() error code 77: cudaErrorIllegalAddress, without any idea why. I did the search for both the code and the function, surprisingly , only a few records showed up. Very strange.
I basically sum up all pixels of images. To make my questions have as much reference as it can, I am showing all my CUDA code here:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "thorcalgpu.h"
#include <stdio.h>
#include "math.h"
#include <vector>
#include <algorithm>
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
using namespace std;
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
__global__ void reduceSum(unsigned short *input,
unsigned long long *per_block_results,
const int n)
{
extern __shared__ unsigned long long sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
unsigned short x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
// Helper function for using CUDA to add vectors in parallel.
//template <class T>
cudaError_t gpuWrapper(float *mean, int N, vector<string> filelist)
{
int size = N*N;
unsigned long long* dev_sum = 0;
unsigned short* dev_img = 0;
cudaError_t cudaStatus;
const int block_size = 512;
const int num_blocks = (size/block_size) + ((size%block_size) ? 1 : 0);
int L = filelist.size();
// Choose which GPU to run on, change this on a multi-GPU system.
double totalgpuinittime = 0;
StartCounter(7);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_img, size * sizeof(unsigned short));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_sum, num_blocks*sizeof(unsigned long long));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
totalgpuinittime = GetCounter(7);
unsigned short* img;
unsigned short* pimg;
unsigned long long* sum = new unsigned long long[num_blocks];
unsigned long long* psum = sum;
cout<<endl;
cout << "gpu looping starts, and in progress ..." << endl;
StartCounter(6);
double totalfileiotime = 0;
double totalh2dcpytime = 0;
double totalkerneltime = 0;
double totald2hcpytime = 0;
double totalcpusumtime = 0;
double totalloopingtime = 0;
for (int k = 0; k < L; k++)
{
StartCounter(1);
img = (unsigned short*)LoadTIFF(filelist[k].c_str());
totalfileiotime += GetCounter(1);
psum = sum;
pimg = img;
float gpumean = 0;
memset(psum, 0, sizeof(unsigned long long)*num_blocks);
StartCounter(2);
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_img, pimg, size * sizeof(unsigned short), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_sum, psum, num_blocks*sizeof(unsigned long long), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
totalh2dcpytime += GetCounter(2);
StartCounter(3);
//reduceSum<<<num_blocks,block_size,num_blocks * sizeof(unsigned long long)>>>(dev_img, dev_sum, size);
//reduceSum<<<num_blocks,block_size,block_size * sizeof(unsigned short)>>>(dev_img, dev_sum, size);
reduceSum<<<num_blocks,block_size>>>(dev_img, dev_sum, size);
totalkerneltime += GetCounter(3);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "reduction Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
// !!!!!! following is where the code 77 error occurs!!!!!!!
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
StartCounter(4);
cudaStatus = cudaMemcpy(psum, dev_sum, num_blocks * sizeof(unsigned long long ), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
totald2hcpytime += GetCounter(4);
StartCounter(5);
for (int i = 0; i < num_blocks; i++)
{
gpumean += *psum;
psum++;
}
gpumean /= N*N;
totalcpusumtime += GetCounter(5);
delete img;
img = NULL;
cout<<gpumean<<endl;
}
int S = 1e+6;
int F = filelist.size();
float R = S/F;
totalloopingtime = GetCounter(6);
cout<<"gpu looping ends."<<endl<<endl;
cout<< "analysis:"<<endl;
cout<<"gpu initialization time: "<<totalgpuinittime<<" sec"<<endl<<endl;
cout<<"file I/O time: "<<endl;
cout<<" total "<<totalfileiotime<<" sec | average "<<totalfileiotime*R<<" usec/frame"<<endl<<endl;
cout<<"host-to-device copy time: "<<endl;
cout<<" total "<<totalh2dcpytime<<" sec | average "<<totalh2dcpytime*R<<" usec/frame"<<endl<<endl;
cout<<"pure gpu kerneling time: "<<endl;
cout<<" total "<<totalkerneltime<<" sec | average "<<totalkerneltime*R<<" usec/frame"<<endl<<endl;
cout<<"device-to-host copy time: "<<endl;
cout<<" total "<<totald2hcpytime<<" sec | average "<<totald2hcpytime*R<<" usec/frame"<<endl<<endl;
/*cout<<"cpu summing time: "<<endl;
cout<<" total: "<<totalcpusumtime<<" sec | average: "<<totalcpusumtime*R<<" usec/frame"<<endl<<endl;;*/
/*cout <<"gpu looping time: " << endl;
cout<<" total: "<<totalloopingtime<<" sec | average: "<<totalloopingtime*R<<" usec/frame"<<endl;*/
Error:
cudaFree(dev_sum);
cudaFree(dev_img);
delete sum;
sum = NULL;
return cudaStatus;
}
void kernel(float* &mean, int N, vector<string> filelist)
{
// wrapper and kernel
cudaError_t cudaStatus = gpuWrapper(mean, N, filelist);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "gpuWapper failed!");
}
// printf("mean is: %f\n", mean);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
StartCounter(8);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
}
cout<<"gpu reset time: "<<GetCounter(8)<<" sec"<<endl<<endl;
//return *mean;
}
I have assigned enough and equivalent memory space for both host and device memory. Any comments is appreciated.
While this may not be the only source of error in the code, you are not allocating any dynamic shared memory for the reduction kernel, leading to the illegal addressing error you see. The correct kernel launch should be something like
size_t shm_size = block_size * sizeof(unsigned long long);
reduceSum<<<num_blocks,block_size,shm_size>>>(dev_img, dev_sum, size);
This allocates the equivalent of one unsigned long long for each thread running in the reduction kernel, which (by my very cursory reading of your code) should make the shared memory array sdata the correct size for the kernel to run without out-of-bounds access to that array.
Having defined how to deal with errors:
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
Normally, to store our results in the array d_results, of type double, of size N, that can be allocated in the GPU memory at once, we can manage to transfer the data from the device to the host like so:
double *d_results;
HANDLE_ERROR(cudaMalloc(&d_results,N*sizeof(double)));
//Launch our kernel to do some computations and store the results in d_results
.....
// and transfer our data from the device to the host
vector<double> results(N);
cudaMemcpy(results.data(),d_results,N*sizeof(double),cudaMemcpyDeviceToHost);
If the second line fails because there are not enough memory to store all the results at once. How can I manage to do the computations and transfer the results to the host properly? is mandatory to do the computation by batches? I rather to avoid a manual batching. What is the standard approach to manage this situation in CUDA?
Batching is the best way to go. You can automate most of the batching process if you do something like this:
#include <assert.h>
#include <iostream>
int main()
{
// Allocate 4 Gb array on host
const size_t N = 1 << 30;
int * data = new int[N];
// Allocate as much memory as will fit on GPU
size_t total_mem, free_mem;
cudaMemGetInfo(&free_mem, &total_mem);
const size_t MB = 1 << 20;
cudaError_t status;
int *buffer;
size_t buffer_size = free_mem;
for(; buffer_size > MB; buffer_size -= MB) {
status = cudaMalloc((void **)&buffer, buffer_size);
if (status == cudaSuccess)
break;
}
std::cout << "Allocated " << buffer_size << " bytes on GPU" << std::endl;
// Loop through host source data in batches
std::cout << N << " items require processing" << std::endl;
size_t batchN = buffer_size / sizeof(int);
size_t remainN = N;
int * dp = data;
std::cout << "Using batch size " << batchN << std::endl;
for(; remainN > 0; remainN -= batchN) {
batchN = (remainN < batchN) ? remainN : batchN;
size_t worksize = batchN * sizeof(int);
std::cout << "Processing batch of size " << batchN;
std::cout << "," << remainN << " items remaining" << std::endl;
cudaMemcpy(buffer, dp, worksize, cudaMemcpyHostToDevice);
cudaMemset(buffer, 0xff, worksize);
cudaMemcpy(dp, buffer, worksize, cudaMemcpyDeviceToHost);
dp += batchN;
}
for(size_t i = 0; i < N; i++) {
assert(data[i] == 0xffffffff);
}
cudaDeviceReset();
return 0;
}
Which is basically
Allocate as much free memory as your device has
Iteratively process the input data to the gpu in buffer size chunks until everything is done
In the above code I have used cudaMemset as a proxy for a real kernel, but it gives you an idea of what is required. If you want to get fancier, you could use two buffers and streams (with registered/pinned host memory) and copy asynchronously to get compute/copy overlap which will improve the overall performance in non trivial cases.
i want to make a FFT from double to std::complex with the CuFFT Lib. My Code looks like
#include <complex>
#include <iostream>
#include <cufft.h>
#include <cuda_runtime_api.h>
typedef std::complex<double> Complex;
using namespace std;
int main(){
int n = 100;
double* in;
Complex* out;
in = (double*) malloc(sizeof(double) * n);
out = (Complex*) malloc(sizeof(Complex) * n/2+1);
for(int i=0; i<n; i++){
in[i] = 1;
}
cufftHandle plan;
plan = cufftPlan1d(&plan, n, CUFFT_D2Z, 1);
unsigned int mem_size = sizeof(double)*n;
cufftDoubleReal *d_in;
cufftDoubleComplex *d_out;
cudaMalloc((void **)&d_in, mem_size);
cudaMalloc((void **)&d_out, mem_size);
cudaMemcpy(d_in, in, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, mem_size, cudaMemcpyHostToDevice);
int succes = cufftExecD2Z(plan,(cufftDoubleReal *) d_in,(cufftDoubleComplex *) d_out);
cout << succes << endl;
cudaMemcpy(out, d_out, mem_size, cudaMemcpyDeviceToHost);
for(int i=0; i<n/2; i++){
cout << "out: " << i << " " << out[i].real() << " " << out[i].imag() << endl;
}
return 0;
}
but it seems to me this must be wrong, because i think the transformed values should be 1 0 0 0 0 .... or without the normalization 100 0 0 0 0 .... but i just get 0 0 0 0 0 ...
Furthermore i would like it more if the cufftExecD2Z would work in place, which should be possible but i haven't figured out how to correctly do so. Can anybody help?
Your code has a variety of errors. You should probably review cufft documentation as well as the sample codes.
You should do proper cuda error checking and proper cufft error checking on all API return values.
The return value of the cufftPlan1d function does not go into the plan:
plan = cufftPlan1d(&plan, n, CUFFT_D2Z, 1);
The function itself sets the plan (that is why you pass &plan to the function), then when you assign the return value into the plan, it ruins the plan set up by the function.
You correctly identified that the output can be of size ((N/2)+1), but then you didn't allocate space for it properly either on the host side:
out = (Complex*) malloc(sizeof(Complex) * n/2+1);
or on the device side:
unsigned int mem_size = sizeof(double)*n;
...
cudaMalloc((void **)&d_out, mem_size);
The following code has some of the above problems fixed, enough to get your desired result (100, 0, 0, ...)
#include <complex>
#include <iostream>
#include <cufft.h>
#include <cuda_runtime_api.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
typedef std::complex<double> Complex;
using namespace std;
int main(){
int n = 100;
double* in;
Complex* out;
#ifdef IN_PLACE
in = (double*) malloc(sizeof(Complex) * (n/2+1));
out = (Complex*)in;
#else
in = (double*) malloc(sizeof(double) * n);
out = (Complex*) malloc(sizeof(Complex) * (n/2+1));
#endif
for(int i=0; i<n; i++){
in[i] = 1;
}
cufftHandle plan;
cufftResult res = cufftPlan1d(&plan, n, CUFFT_D2Z, 1);
if (res != CUFFT_SUCCESS) {cout << "cufft plan error: " << res << endl; return 1;}
cufftDoubleReal *d_in;
cufftDoubleComplex *d_out;
unsigned int out_mem_size = (n/2 + 1)*sizeof(cufftDoubleComplex);
#ifdef IN_PLACE
unsigned int in_mem_size = out_mem_size;
cudaMalloc((void **)&d_in, in_mem_size);
d_out = (cufftDoubleComplex *)d_in;
#else
unsigned int in_mem_size = sizeof(cufftDoubleReal)*n;
cudaMalloc((void **)&d_in, in_mem_size);
cudaMalloc((void **)&d_out, out_mem_size);
#endif
cudaCheckErrors("cuda malloc fail");
cudaMemcpy(d_in, in, in_mem_size, cudaMemcpyHostToDevice);
cudaCheckErrors("cuda memcpy H2D fail");
res = cufftExecD2Z(plan,d_in, d_out);
if (res != CUFFT_SUCCESS) {cout << "cufft exec error: " << res << endl; return 1;}
cudaMemcpy(out, d_out, out_mem_size, cudaMemcpyDeviceToHost);
cudaCheckErrors("cuda memcpy D2H fail");
for(int i=0; i<n/2; i++){
cout << "out: " << i << " " << out[i].real() << " " << out[i].imag() << endl;
}
return 0;
}
Review the documentation on what is necessary to do an in-place transform in the real to complex case. The above code can be recompiled with -DIN_PLACE to see the behavior for an in-place transform, and the necessary code changes.
I'm Trying to bin a 2D array to a texture and to do interpolation between the data. My Problem is. When I'm binding my Array to the texture the the Values i access are total nonsense. Even when I'm trying to acces the first Value (text2D(tex,0.0f,0.0f) i doesn't make sense. So i guess I'm binding it wrong or my memcopy is wrong. Any ideas where my mistake is?
Here is the Code
#include <stdio.h>
#include <iostream>
#include "cuda.h"
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "HelloWorld.h"
#include "linearInterpolation_kernel.cu"
#include "linearInterpolation_kernel2.cu"
#include "linearInterpolation_kernel3.cu"
using namespace std;
using std::cout;
const int blocksize = 16;
__global__
void hello(char *a, int *b) {
a[threadIdx.x] += b[threadIdx.x];
}
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
printf("%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
}
}
int main()
{
int N = 200;
float *A;
A = (float *) malloc(N*sizeof(float));
float *B;
B = (float *) malloc(N*sizeof(float));
float *result;
result = (float *) malloc(N*sizeof(float));
float angle = 0.5f;
for(int i = 0; i < N; i++){
A[i] = (float)rand();
B[i] = (float)rand();
}
cout << A[3] << endl;
cout << B[3] << endl;
ipLinearTexture(A,B,result,angle,N);
float result2;
result2 = (angle)*A[3] + (1-angle)*B[3];
printf(" A %f B %f Result %f\n", A[3], B[3], result[3]);
cout << result2 << endl;
return 1;
}
void ipLinearTexture(float *A, float* B, float* result, float angle, int N)
{
float cuTime;
const int N2 = N;
float *dev_result;
float **AB;
AB = (float **) malloc( N * sizeof(float *));
if(AB)
{
for(int i = 0; i < N; i++)
{
AB[i] = (float *) calloc( 2 , sizeof(float *));
}
}
for (int i = 0; i < N; i++)
{
AB[i][0] = A[i];
AB[i][1] = B[i];
}
cudaMalloc(&dev_result, N * sizeof(float));
unsigned int size = N * 2 * sizeof(float);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray* cu_array;
checkCudaErrors(cudaMallocArray( &cu_array, &channelDesc,N,2 ));
checkCudaErrors(cudaMemcpyToArray( cu_array, 0, 0, AB, size, cudaMemcpyHostToDevice));
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false; // access with normalized texture coordinates
checkCudaErrors(cudaBindTextureToArray( tex, cu_array, channelDesc));
dim3 dimBlock(10, 1, 1);
dim3 dimGrid((int)ceil((double)N*2/dimBlock.x), 1, 1);
transformKernel3<<< dimGrid, dimBlock, 0 >>>( dev_result, N, 2, angle);
checkCudaErrors(cudaUnbindTexture(tex));
cudaMemcpy(result, dev_result, N * sizeof(float), cudaMemcpyKind::cudaMemcpyDeviceToHost);
result[0] = (float)cuTime;
cout << "==================================================" << endl;
for (int i = 0 ; i < N ;i++)
{
cout << result[i] << endl;
}
cout << "==================================================" << endl;
cudaFree(dev_result);
cudaFreeArray(cu_array);
}
Here is the code inside the Kernel
#ifndef _SIMPLETEXTURE_KERNEL3_H_
#define _SIMPLETEXTURE_KERNEL3_H_
// declare texture reference for 2D float texture
texture<float, 1> tex;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! #param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
transformKernel3( float* g_odata, int width, int height, float theta)
{
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < width*height)
{
g_odata[id] = tex1D(tex, xid * 2 + 0.5f);
}
}
#endif // #ifndef _SIMPLETEXTURE_KERNEL_H_
Like the concept in OpenGL, you could think a 2D texture is a rectangle field. The center point of each small rectangle is your array data. So, tex2D(tex, 0.5f/width, 0.5f/height) will be exactly your first value of array data. (width & height is the width and height of 2D array data)