Cuda Texture Memory does not inherit the right Values - cuda

I'm Trying to bin a 2D array to a texture and to do interpolation between the data. My Problem is. When I'm binding my Array to the texture the the Values i access are total nonsense. Even when I'm trying to acces the first Value (text2D(tex,0.0f,0.0f) i doesn't make sense. So i guess I'm binding it wrong or my memcopy is wrong. Any ideas where my mistake is?
Here is the Code
#include <stdio.h>
#include <iostream>
#include "cuda.h"
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "HelloWorld.h"
#include "linearInterpolation_kernel.cu"
#include "linearInterpolation_kernel2.cu"
#include "linearInterpolation_kernel3.cu"
using namespace std;
using std::cout;
const int blocksize = 16;
__global__
void hello(char *a, int *b) {
a[threadIdx.x] += b[threadIdx.x];
}
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
printf("%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
}
}
int main()
{
int N = 200;
float *A;
A = (float *) malloc(N*sizeof(float));
float *B;
B = (float *) malloc(N*sizeof(float));
float *result;
result = (float *) malloc(N*sizeof(float));
float angle = 0.5f;
for(int i = 0; i < N; i++){
A[i] = (float)rand();
B[i] = (float)rand();
}
cout << A[3] << endl;
cout << B[3] << endl;
ipLinearTexture(A,B,result,angle,N);
float result2;
result2 = (angle)*A[3] + (1-angle)*B[3];
printf(" A %f B %f Result %f\n", A[3], B[3], result[3]);
cout << result2 << endl;
return 1;
}
void ipLinearTexture(float *A, float* B, float* result, float angle, int N)
{
float cuTime;
const int N2 = N;
float *dev_result;
float **AB;
AB = (float **) malloc( N * sizeof(float *));
if(AB)
{
for(int i = 0; i < N; i++)
{
AB[i] = (float *) calloc( 2 , sizeof(float *));
}
}
for (int i = 0; i < N; i++)
{
AB[i][0] = A[i];
AB[i][1] = B[i];
}
cudaMalloc(&dev_result, N * sizeof(float));
unsigned int size = N * 2 * sizeof(float);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray* cu_array;
checkCudaErrors(cudaMallocArray( &cu_array, &channelDesc,N,2 ));
checkCudaErrors(cudaMemcpyToArray( cu_array, 0, 0, AB, size, cudaMemcpyHostToDevice));
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false; // access with normalized texture coordinates
checkCudaErrors(cudaBindTextureToArray( tex, cu_array, channelDesc));
dim3 dimBlock(10, 1, 1);
dim3 dimGrid((int)ceil((double)N*2/dimBlock.x), 1, 1);
transformKernel3<<< dimGrid, dimBlock, 0 >>>( dev_result, N, 2, angle);
checkCudaErrors(cudaUnbindTexture(tex));
cudaMemcpy(result, dev_result, N * sizeof(float), cudaMemcpyKind::cudaMemcpyDeviceToHost);
result[0] = (float)cuTime;
cout << "==================================================" << endl;
for (int i = 0 ; i < N ;i++)
{
cout << result[i] << endl;
}
cout << "==================================================" << endl;
cudaFree(dev_result);
cudaFreeArray(cu_array);
}
Here is the code inside the Kernel
#ifndef _SIMPLETEXTURE_KERNEL3_H_
#define _SIMPLETEXTURE_KERNEL3_H_
// declare texture reference for 2D float texture
texture<float, 1> tex;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! #param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
transformKernel3( float* g_odata, int width, int height, float theta)
{
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < width*height)
{
g_odata[id] = tex1D(tex, xid * 2 + 0.5f);
}
}
#endif // #ifndef _SIMPLETEXTURE_KERNEL_H_

Like the concept in OpenGL, you could think a 2D texture is a rectangle field. The center point of each small rectangle is your array data. So, tex2D(tex, 0.5f/width, 0.5f/height) will be exactly your first value of array data. (width & height is the width and height of 2D array data)

Related

Is cuda atomicAdd operation faster than launch another kernel when we do reduce sum?

I need to sum up a vector, which is longer than the number of threads in a cuda block. So I use multi blocks to handle the task. I sum up a part of the vector within each block, after which I have two options, one is to use atomicAdd to combine the sum of each block, and the other is to write the result in some global memory and launch another kernel to sum up. Which method do you recommand me to use ?
Is cuda atomicAdd operation faster than launch another kernel when we do reduce sum?
For the following test case, using the code lifted from slides 16 and 17 in the training here (video), it seems to be a bit faster. The difference is about the cost of kernel launch overhead, which makes sense:
$ cat t1834.cu
#include <time.h>
#include <sys/time.h>
#include <iostream>
const int BLOCK_SIZE = 1024;
template <typename T>
__global__ void reduce(const T * __restrict__ gdata, T * __restrict__ out, const int N){
__shared__ T sdata[BLOCK_SIZE];
int tid = threadIdx.x;
sdata[tid] = 0.0;
size_t idx = threadIdx.x+blockDim.x*blockIdx.x;
while (idx < N) { // grid stride loop to load data
sdata[tid] += gdata[idx];
idx += gridDim.x*blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s) // parallel sweep reduction
sdata[tid] += sdata[tid + s];
}
if (tid == 0)
#ifndef USE_ATOMIC
out[blockIdx.x] = sdata[0];
#else
atomicAdd(out, sdata[0]);
#endif
}
#define USECPSEC 1000000ULL
unsigned long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
typedef float mt;
const int ds = 1048576*8;
int main(){
mt *h_gdata, *d_gdata, *h_out, *d_out;
h_gdata = new mt[ds];
cudaMalloc(&d_gdata, ds*sizeof(mt));
const int nblocks = 160;
h_out = new mt[1];
cudaMalloc(&d_out, nblocks*sizeof(mt));
for (int i = 0; i < ds; i++) h_gdata[i] = 1;
cudaMemcpy(d_gdata, h_gdata, ds*sizeof(mt), cudaMemcpyHostToDevice);
reduce<<<nblocks, BLOCK_SIZE>>>(d_gdata, d_out, ds); // warm-up
cudaDeviceSynchronize();
cudaMemset(d_out, 0, sizeof(mt));
unsigned long long dt = dtime_usec(0);
reduce<<<nblocks, BLOCK_SIZE>>>(d_gdata, d_out, ds);
#ifndef USE_ATOMIC
reduce<<<1, BLOCK_SIZE>>>(d_out, d_out, nblocks);
#endif
cudaDeviceSynchronize();
dt = dtime_usec(dt);
cudaMemcpy(h_out, d_out, sizeof(mt), cudaMemcpyDeviceToHost);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; return 0;}
if (h_out[0] != ds) {std::cout << "Reduce Error: " << h_out[0] << std::endl; return 0;}
std::cout << "Timing: " << dt << "us" << std::endl;
return 0;
}
$ nvcc -lineinfo -arch=sm_70 -O3 -o t1834 t1834.cu -std=c++14 -Wno-deprecated-gpu-targets
$ ./t1834
Timing: 69us
$ nvcc -lineinfo -arch=sm_70 -O3 -o t1834 t1834.cu -std=c++14 -Wno-deprecated-gpu-targets -DUSE_ATOMIC
$ ./t1834
Timing: 66us
$
(CUDA 11.2, Centos 7, V100 GPU)

Pthreads and CudaMemcpyAsync

I wrote a test program to test the following idea: (1) a cuda stream copies data to gpu. The copy is done in a pthread. (2) a second cuda stream reads and processes data. (3) One more data is copied by first stream only if the previous data is processed by the second stream.
However, it does not work: only copy first data and then waiting there.
#include "cuda.h"
#include <iostream>
#include <pthread.h>
const int UNPROCESSED = 1;
const int PROCESSED = 2;
const int DONE = 3;
const int RUNNING= 0;
const int NUM_OF_DATA = 100;
const int NUM_OF_BLOCKS = 1;
const int THREADS_PER_BLOCK = 1;
//int data_states[NUM_OF_DATA];
cudaStream_t cuda_stream[2];
volatile int* process_state;
volatile int* d_process_state;
volatile int* d_copier_state;
int* d_data_state;
int* h_data_states;
cudaError_t cuda_status;
using namespace std;
void* copy_data(void* arg){
int i=0;
//cout << "in copy_data" << endl;
while(i < NUM_OF_DATA){
if (*process_state != UNPROCESSED){
cout << "Now copy data " << i << " with state = " << h_data_states[i] << endl;
*process_state = UNPROCESSED;
cuda_status = cudaMemcpyAsync(d_data_state, &h_data_states[i], sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
i++;
}
}
int copier_state = DONE;
cudaMemcpyAsync((void*) d_copier_state, &copier_state, sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
}
__global__ void process_data(volatile int* data_state, volatile int* process_state, volatile int* copier_state){
int i = 0;
printf(" i = %d\n", i);
while(*copier_state != DONE){
printf(" i = %d, copier_state = %d, data_state = %d\n", i, *copier_state, *data_state);
if(*data_state == UNPROCESSED){
printf("now processing data %d\n", i);
i++;
// process data here, skipped
*process_state = PROCESSED;
*data_state = PROCESSED;
//__threadfence_system();
}
}
printf("process_data is done\n");
}
int main(int argc, char **argv){
int i;
cudaSetDeviceFlags(cudaDeviceMapHost);
cuda_status = cudaMallocHost((void**) &process_state, NUM_OF_BLOCKS*sizeof(int), cudaHostAllocMapped);
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
cudaHostGetDevicePointer((int**) &d_process_state, (int*) process_state, 0);
cuda_status = cudaMalloc((void**) &d_copier_state, NUM_OF_BLOCKS*sizeof(int));
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
cudaMemset((void*)d_copier_state, RUNNING, sizeof(int));
cuda_status = cudaMallocHost((void**) &h_data_states, NUM_OF_DATA*sizeof(int), 0);
if (cuda_status != cudaSuccess){
cout << "Error when allocating pinned host memory (full_instance_states)" << endl;
}
for(i = 0; i < NUM_OF_DATA; i++){
h_data_states[i] = UNPROCESSED;
}
cudaStreamCreate(&cuda_stream[0]);
cudaStreamCreate(&cuda_stream[1]);
pthread_t thread;
int thread_state = pthread_create(&thread, NULL, &copy_data, h_data_states);
if(thread_state){
cout << "Error: unable to create thread (produce_instances), "<< thread_state << endl;
exit(-1);
}
//cout << "Starting kernel" << endl;
process_data<<<NUM_OF_BLOCKS, THREADS_PER_BLOCK, 0, cuda_stream[1]>>>(d_data_state, d_process_state, d_copier_state);
cudaDeviceSynchronize();
cudaFree(d_data_state);
cudaFree((void*) d_copier_state);
cudaFreeHost((void*) process_state);
return 0;
}
You never allocate d_data_state in any way. It is a NULL pointer throughout your program.
Therefore the usage here is invalid:
cuda_status = cudaMemcpyAsync(d_data_state, &h_data_states[i], sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
And when I run your program, I get the error printout from the next line of code.
Since your kernel also uses d_data_state (which is an invalid pointer) I get various invalid global read errors if I run your code with cuda-memcheck.
Since you have not allocated anything for d_data_state, your code cannot possibly work.
You had several other issues in your code as well. As just one example:
int copier_state = DONE;
cudaMemcpyAsync((void*) d_copier_state, &copier_state, sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
In order for cudaMemcpyAsync to work as expected (i.e. be asynchronous, and overlap with other stream activity) the host memory must be a pinned memory area. int copier_state = DONE; does not create a pinned allocation, so copying from that breaks the asynchronous overlap of the cudaMemcpyAsync operation.
Here is a version of your code that works correctly for me (now updated with some additional guards against race conditions):
#include <iostream>
#include <pthread.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int UNPROCESSED = 1;
const int PROCESSED = 2;
const int DONE = 3;
const int RUNNING= 0;
const int NUM_OF_DATA = 100;
const int NUM_OF_BLOCKS = 1;
const int THREADS_PER_BLOCK = 1;
//int data_states[NUM_OF_DATA];
cudaStream_t cuda_stream[2];
volatile int* process_state;
volatile int* d_process_state;
volatile int* d_copier_state;
int* d_data_state;
int* h_data_states;
int* h_copier_state;
cudaError_t cuda_status;
using namespace std;
void* copy_data(void* arg){
int i=0;
cudaSetDevice(0);
//cout << "in copy_data" << endl;
while(i < NUM_OF_DATA){
if (*process_state != UNPROCESSED){
// cout << "Now copy data " << i << " with state = " << h_data_states[i] << endl;
*process_state = UNPROCESSED;
cudaMemcpyAsync(d_data_state, &(h_data_states[i]), sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
cudaStreamSynchronize(cuda_stream[0]);
cudaCheckErrors("thread cudaMemcpyAsync fail");
//*process_state = UNPROCESSED;
i++;
}
}
*h_copier_state = DONE;
cudaMemcpyAsync((void *)d_copier_state, h_copier_state, sizeof(int), cudaMemcpyHostToDevice, cuda_stream[0]);
cudaCheckErrors("thread cudaMemcpyAsync 2 fail");
// cout << "Thread finished" << endl;
return NULL;
}
__global__ void process_data(volatile int* data_state, volatile int* process_state, volatile int* copier_state){
int i = 0;
//printf(" i = %d\n", i);
while(*copier_state != DONE){
//printf(" i = %d, copier_state = %d, data_state = %d\n", i, *copier_state, *data_state);
if(*data_state == UNPROCESSED){
//printf("now processing data %d\n", i);
i++;
// process data here, skipped
*data_state = PROCESSED;
__threadfence_system();
*process_state = PROCESSED;
__threadfence_system();
}
}
// printf("process_data is done\n");
}
int main(int argc, char **argv){
int i;
cudaSetDevice(0);
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaMallocHost((void**) &process_state, NUM_OF_BLOCKS*sizeof(int), cudaHostAllocMapped);
cudaCheckErrors("cudaMallocHost 1 fail");
cudaHostGetDevicePointer((int**) &d_process_state, (int*) process_state, 0);
cudaMalloc((void**) &d_copier_state, sizeof(int));
cudaCheckErrors("cudaMalloc 1 fail");
cudaMemset((void*)d_copier_state, RUNNING, sizeof(int));
cudaMallocHost((void**) &h_copier_state, sizeof(int), 0);
cudaCheckErrors("cudaMallocHost 3 fail");
*h_copier_state = RUNNING;
cudaMallocHost((void**) &h_data_states, NUM_OF_DATA*sizeof(int), 0);
cudaCheckErrors("cudaMallocHost 2 fail");
for(i = 0; i < NUM_OF_DATA; i++){
h_data_states[i] = UNPROCESSED;
}
cudaMalloc((void**) &d_data_state, sizeof(int));
cudaCheckErrors("cudaMalloc 2 fail");
cudaMemcpy((void*)d_data_state, &(h_data_states[0]), sizeof(int), cudaMemcpyHostToDevice);
cudaStreamCreate(&cuda_stream[0]);
cudaStreamCreate(&cuda_stream[1]);
pthread_t thread;
int thread_state = pthread_create(&thread, NULL, &copy_data, NULL);
if(thread_state){
cout << "Error: unable to create thread (produce_instances), "<< thread_state << endl;
exit(-1);
}
//cout << "Starting kernel" << endl;
process_data<<<NUM_OF_BLOCKS, THREADS_PER_BLOCK, 0, cuda_stream[1]>>>(d_data_state, d_process_state, d_copier_state);
cudaDeviceSynchronize();
return 0;
}
As an aside, it's not necessary to have all the complexity of pthreads to run one extra thread. After the cuda kernel launch, all of your pthread-code could have been inserted in the main host thread, and your program would still work correctly. The host thread runs asynchronously to, and in parallel to, the device kernel, after a kernel launch.

CuFFT Double to Complex

i want to make a FFT from double to std::complex with the CuFFT Lib. My Code looks like
#include <complex>
#include <iostream>
#include <cufft.h>
#include <cuda_runtime_api.h>
typedef std::complex<double> Complex;
using namespace std;
int main(){
int n = 100;
double* in;
Complex* out;
in = (double*) malloc(sizeof(double) * n);
out = (Complex*) malloc(sizeof(Complex) * n/2+1);
for(int i=0; i<n; i++){
in[i] = 1;
}
cufftHandle plan;
plan = cufftPlan1d(&plan, n, CUFFT_D2Z, 1);
unsigned int mem_size = sizeof(double)*n;
cufftDoubleReal *d_in;
cufftDoubleComplex *d_out;
cudaMalloc((void **)&d_in, mem_size);
cudaMalloc((void **)&d_out, mem_size);
cudaMemcpy(d_in, in, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, mem_size, cudaMemcpyHostToDevice);
int succes = cufftExecD2Z(plan,(cufftDoubleReal *) d_in,(cufftDoubleComplex *) d_out);
cout << succes << endl;
cudaMemcpy(out, d_out, mem_size, cudaMemcpyDeviceToHost);
for(int i=0; i<n/2; i++){
cout << "out: " << i << " " << out[i].real() << " " << out[i].imag() << endl;
}
return 0;
}
but it seems to me this must be wrong, because i think the transformed values should be 1 0 0 0 0 .... or without the normalization 100 0 0 0 0 .... but i just get 0 0 0 0 0 ...
Furthermore i would like it more if the cufftExecD2Z would work in place, which should be possible but i haven't figured out how to correctly do so. Can anybody help?
Your code has a variety of errors. You should probably review cufft documentation as well as the sample codes.
You should do proper cuda error checking and proper cufft error checking on all API return values.
The return value of the cufftPlan1d function does not go into the plan:
plan = cufftPlan1d(&plan, n, CUFFT_D2Z, 1);
The function itself sets the plan (that is why you pass &plan to the function), then when you assign the return value into the plan, it ruins the plan set up by the function.
You correctly identified that the output can be of size ((N/2)+1), but then you didn't allocate space for it properly either on the host side:
out = (Complex*) malloc(sizeof(Complex) * n/2+1);
or on the device side:
unsigned int mem_size = sizeof(double)*n;
...
cudaMalloc((void **)&d_out, mem_size);
The following code has some of the above problems fixed, enough to get your desired result (100, 0, 0, ...)
#include <complex>
#include <iostream>
#include <cufft.h>
#include <cuda_runtime_api.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
typedef std::complex<double> Complex;
using namespace std;
int main(){
int n = 100;
double* in;
Complex* out;
#ifdef IN_PLACE
in = (double*) malloc(sizeof(Complex) * (n/2+1));
out = (Complex*)in;
#else
in = (double*) malloc(sizeof(double) * n);
out = (Complex*) malloc(sizeof(Complex) * (n/2+1));
#endif
for(int i=0; i<n; i++){
in[i] = 1;
}
cufftHandle plan;
cufftResult res = cufftPlan1d(&plan, n, CUFFT_D2Z, 1);
if (res != CUFFT_SUCCESS) {cout << "cufft plan error: " << res << endl; return 1;}
cufftDoubleReal *d_in;
cufftDoubleComplex *d_out;
unsigned int out_mem_size = (n/2 + 1)*sizeof(cufftDoubleComplex);
#ifdef IN_PLACE
unsigned int in_mem_size = out_mem_size;
cudaMalloc((void **)&d_in, in_mem_size);
d_out = (cufftDoubleComplex *)d_in;
#else
unsigned int in_mem_size = sizeof(cufftDoubleReal)*n;
cudaMalloc((void **)&d_in, in_mem_size);
cudaMalloc((void **)&d_out, out_mem_size);
#endif
cudaCheckErrors("cuda malloc fail");
cudaMemcpy(d_in, in, in_mem_size, cudaMemcpyHostToDevice);
cudaCheckErrors("cuda memcpy H2D fail");
res = cufftExecD2Z(plan,d_in, d_out);
if (res != CUFFT_SUCCESS) {cout << "cufft exec error: " << res << endl; return 1;}
cudaMemcpy(out, d_out, out_mem_size, cudaMemcpyDeviceToHost);
cudaCheckErrors("cuda memcpy D2H fail");
for(int i=0; i<n/2; i++){
cout << "out: " << i << " " << out[i].real() << " " << out[i].imag() << endl;
}
return 0;
}
Review the documentation on what is necessary to do an in-place transform in the real to complex case. The above code can be recompiled with -DIN_PLACE to see the behavior for an in-place transform, and the necessary code changes.

I lost data after __syncthreads() in cuda

I am trying to find the maximum of an array.. I took the help from CUDA Maximum Reduction Algorithm Not Working. and do some own modification. However I am running it for 16 data. I am finding that in kernel code shared memory copies only 1st 4data. rest are lost. I put two cuPrintf..1st printf shows data is their in the shared memory. But the 2nd cuPrintf is just after __syncthreads.. and that shows 0 from thread ids 4 onwords.. pls help
#include
#include
#include
#include
#include
#include "cuPrintf.cu"
#include "cuPrintf.cuh"
__device__ float MaxOf2(float a, float b)
{
if(a > b) return a;
else return b;
}
__global__ void findMax(int size,float *array_device , float *outPut)
{
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i< size)
{
sdata[tid] = array_device[i];
cuPrintf(" array_d[%d]===%f, sdata[%d]===%f\n ",i,array_device[i],tid,sdata[tid]);
__threadfence();
}
__syncthreads();
if(tid<size)
cuPrintf(" array_d[%d]===%f, sdata[%d]===%f\n ",i,array_device[i],tid,sdata[tid]);
for ( int s=blockDim.x/2; s>0; s=s>>1)//s=blockDim.x/2
{
if (tid < s)
{
sdata[tid]= MaxOf2(sdata[tid],sdata[tid+s]);
}
__syncthreads();
}
if (tid == 0) outPut[blockIdx.x] = sdata[0];
}
int main()
{
long double M = pow(2,20);
long double N = 2;
int noThreadsPerBlock = 512 ;
printf("\n Provide the array Size N.(array will be of size N * 2^20 ) :-");
scanf("%Lf",&N);
long int size = 16;
int numOfBlock = (int)size /noThreadsPerBlock + 1;
printf("\n num of blocks==%ld",numOfBlock);
float *array_device , *outPut;
float array_host[]={221,100,2,340,47,36,500,1,33,4460,5,6,7,8,9,11};
cudaMalloc((void **)&array_device, size*sizeof(float));
cudaMalloc((void **)&outPut, size*sizeof(float));
cudaError_t error0 = cudaGetLastError();
printf("\n 0CUDA error: %s\n", cudaGetErrorString(error0));
printf("size===%ld",size);
cudaMemcpy(array_device, array_host, size*sizeof(float), cudaMemcpyHostToDevice);
cudaError_t error1 = cudaGetLastError();
printf("\n1CUDA error: %s\n", cudaGetErrorString(error1));
while(size>1 )
{
cudaPrintfInit();
findMax<<< numOfBlock,noThreadsPerBlock>>>(size,array_device, outPut);cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
cudaError_t error2 = cudaGetLastError();
printf(" 2CUDA error: %s\n", cudaGetErrorString(error2));
cudaMemcpy(array_device, outPut, size*sizeof(float), cudaMemcpyDeviceToDevice);
size = numOfBlock;
printf("\n ****size==%ld\n",size);
numOfBlock = (int)size /noThreadsPerBlock + 1;
}
cudaMemcpy(array_host, outPut, size*sizeof(float), cudaMemcpyDeviceToHost);
cudaError_t error3 = cudaGetLastError();
printf("\n3CUDA error: %s\n", cudaGetErrorString(error3));
for(int i=0;i<size;i++)
printf("\n index==%d ;data=%f ",i,array_host[i]);
return 0;
}
I'm posting my comment as an answer as requested.
Firstly, you havent specified dynamic size of shared memory in kernel launch. It should look something like:
findMax<<< numOfBlock,noThreadsPerBlock,sizeof(float)*noThreadsPerBlock>>>
Secondly, what was the concept behind condition if(tid<size) on second cuPrintf? Providing output of the program could also help.

Replicate a vector multiple times using CUDA Thrust

I am trying to solve a problem using CUDA Thrust.
I have a host array with 3 elements. Is it possible, using Thrust, to create a device array of 384 elements in which the 3 elements in my host array is repeated 128 times (128 x 3 = 384)?
Generally speaking, starting from an array of 3 elements, how can I use Thrust to generate a device array of size X, where X = Y x 3, i.e. Y is the number of repetitions?
One possible approach:
create a device vector of appropriate size
create 3 strided ranges, one for each of the element positions {1, 2, 3} in the final output (device) vector
use thrust::fill to fill each of the 3 strided ranges with the appropriate (host vector) element {1, 2, 3}
This code is a trivial modification of the strided range example to demonstrate. You can change the REPS define to 128 to see the full expansion to 384 output elements:
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
// for printing
#include <thrust/copy.h>
#include <ostream>
#define STRIDE 3
#define REPS 15 // change to 128 if you like
#define DSIZE (STRIDE*REPS)
// this example illustrates how to make strided access to a range of values
// examples:
// strided_range([0, 1, 2, 3, 4, 5, 6], 1) -> [0, 1, 2, 3, 4, 5, 6]
// strided_range([0, 1, 2, 3, 4, 5, 6], 2) -> [0, 2, 4, 6]
// strided_range([0, 1, 2, 3, 4, 5, 6], 3) -> [0, 3, 6]
// ...
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
int main(void)
{
thrust::host_vector<int> h_data(STRIDE);
h_data[0] = 1;
h_data[1] = 2;
h_data[2] = 3;
thrust::device_vector<int> data(DSIZE);
typedef thrust::device_vector<int>::iterator Iterator;
strided_range<Iterator> pos1(data.begin(), data.end(), STRIDE);
strided_range<Iterator> pos2(data.begin()+1, data.end(), STRIDE);
strided_range<Iterator> pos3(data.begin()+2, data.end(), STRIDE);
thrust::fill(pos1.begin(), pos1.end(), h_data[0]);
thrust::fill(pos2.begin(), pos2.end(), h_data[1]);
thrust::fill(pos3.begin(), pos3.end(), h_data[2]);
// print the generated data
std::cout << "data: ";
thrust::copy(data.begin(), data.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
return 0;
}
Robert Crovella has already answered this question using strided ranges. He has also pointed out the possibility of using the expand operator.
Below, I'm providing a worked example using the expand operator. Opposite to the use of strided ranges, it avoids the need of for loops.
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/sequence.h>
#include <stdio.h>
using namespace thrust::placeholders;
/*************************************/
/* CONVERT LINEAR INDEX TO ROW INDEX */
/*************************************/
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T> {
T Ncols; // --- Number of columns
__host__ __device__ linear_index_to_row_index(T Ncols) : Ncols(Ncols) {}
__host__ __device__ T operator()(T i) { return i / Ncols; }
};
/*******************/
/* EXPAND OPERATOR */
/*******************/
template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
OutputIterator expand(InputIterator1 first1,
InputIterator1 last1,
InputIterator2 first2,
OutputIterator output)
{
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0), thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(), output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output; thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2, output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
/**************************/
/* STRIDED RANGE OPERATOR */
/**************************/
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
/********/
/* MAIN */
/********/
int main(){
/**************************/
/* SETTING UP THE PROBLEM */
/**************************/
const int Nrows = 10; // --- Number of objects
const int Ncols = 3; // --- Number of centroids
thrust::device_vector<int> d_sequence(Nrows * Ncols);
thrust::device_vector<int> d_counts(Ncols, Nrows);
thrust::sequence(d_sequence.begin(), d_sequence.begin() + Ncols);
expand(d_counts.begin(), d_counts.end(), d_sequence.begin(),
thrust::make_permutation_iterator(
d_sequence.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)));
printf("\n\nCentroid indices\n");
for(int i = 0; i < Nrows; i++) {
std::cout << " [ ";
for(int j = 0; j < Ncols; j++)
std::cout << d_sequence[i * Ncols + j] << " ";
std::cout << "]\n";
}
return 0;
}
As an apparently simpler alternative to using CUDA Thrust, I'm posting below a worked example implementing in CUDA the classical Matlab's meshgrid function.
In Matlab
x = [1 2 3];
y = [4 5 6 7];
[X, Y] = meshgrid(x, y);
produces
X =
1 2 3
1 2 3
1 2 3
1 2 3
and
Y =
4 4 4
5 5 5
6 6 6
7 7 7
X is exactly the four-fold replication of the x array, which is the OP's question and first guess of Robert Crovella's answer, while Y is the three-fold consecutive replication of each element of the y array, which is the second guess of Robert Crovella's answer.
Here is the code:
#include <cstdio>
#include <thrust/pair.h>
#include "Utilities.cuh"
#define BLOCKSIZE_MESHGRID_X 16
#define BLOCKSIZE_MESHGRID_Y 16
#define DEBUG
/*******************/
/* MESHGRID KERNEL */
/*******************/
template <class T>
__global__ void meshgrid_kernel(const T * __restrict__ x, size_t Nx, const float * __restrict__ y, size_t Ny, T * __restrict__ X, T * __restrict__ Y)
{
unsigned int tidx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if ((tidx < Nx) && (tidy < Ny)) {
X[tidy * Nx + tidx] = x[tidx];
Y[tidy * Nx + tidx] = y[tidy];
}
}
/************/
/* MESHGRID */
/************/
template <class T>
thrust::pair<T *,T *> meshgrid(const T *x, const unsigned int Nx, const T *y, const unsigned int Ny) {
T *X; gpuErrchk(cudaMalloc((void**)&X, Nx * Ny * sizeof(T)));
T *Y; gpuErrchk(cudaMalloc((void**)&Y, Nx * Ny * sizeof(T)));
dim3 BlockSize(BLOCKSIZE_MESHGRID_X, BLOCKSIZE_MESHGRID_Y);
dim3 GridSize (iDivUp(Nx, BLOCKSIZE_MESHGRID_X), iDivUp(BLOCKSIZE_MESHGRID_Y, BLOCKSIZE_MESHGRID_Y));
meshgrid_kernel<<<GridSize, BlockSize>>>(x, Nx, y, Ny, X, Y);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
return thrust::make_pair(X, Y);
}
/********/
/* MAIN */
/********/
int main()
{
const int Nx = 3;
const int Ny = 4;
float *h_x = (float *)malloc(Nx * sizeof(float));
float *h_y = (float *)malloc(Ny * sizeof(float));
float *h_X = (float *)malloc(Nx * Ny * sizeof(float));
float *h_Y = (float *)malloc(Nx * Ny * sizeof(float));
for (int i = 0; i < Nx; i++) h_x[i] = i;
for (int i = 0; i < Ny; i++) h_y[i] = i + 4.f;
float *d_x; gpuErrchk(cudaMalloc(&d_x, Nx * sizeof(float)));
float *d_y; gpuErrchk(cudaMalloc(&d_y, Ny * sizeof(float)));
gpuErrchk(cudaMemcpy(d_x, h_x, Nx * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_y, h_y, Ny * sizeof(float), cudaMemcpyHostToDevice));
thrust::pair<float *, float *> meshgrid_pointers = meshgrid(d_x, Nx, d_y, Ny);
float *d_X = (float *)meshgrid_pointers.first;
float *d_Y = (float *)meshgrid_pointers.second;
gpuErrchk(cudaMemcpy(h_X, d_X, Nx * Ny * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_Y, d_Y, Nx * Ny * sizeof(float), cudaMemcpyDeviceToHost));
for (int j = 0; j < Ny; j++) {
for (int i = 0; i < Nx; i++) {
printf("i = %i; j = %i; x = %f; y = %f\n", i, j, h_X[j * Nx + i], h_Y[j * Nx + i]);
}
}
return 0;
}