cuda atomicAdd example fails to yield correct output - cuda

The following code was written with the goal of incrementing a 100 element array of floats by 1 ten times. In the output, I was expecting a 100 element array of 10.0f value for each element. Instead, I get random values. Can you please point out my error here?
__global__ void testAdd(float *a)
{
float temp;
for (int i = 0; i < 100 ; i++)
{
a[i] = atomicAdd(&a[i], 1.0f);
}
}
void cuTestAtomicAdd(float *a)
{
testAdd<<<1, 10>>>(a);
}
My goal is to understand the workings of atomic operations, so as to apply them elsewhere.

That's not how we do an atomicAdd operation.
Just do it like this:
atomicAdd(&a[i], 1.0f);
and the variable in question (a[i]) will be updated.
The return value from an atomic function is generally the old value that was in the variable, before the atomic update.
so doing this:
a[i] = atomicAdd(&a[i], 1.0f);
will update the variable a[i], and then (non-atomically) assign the old value to the variable a[i]. That's almost certainly not what you want.
Read the documentation:
The function returns old.
The following complete code demonstrates correct usage:
#include <iostream>
__global__ void testAdd(float *a)
{
for (int i = 0; i < 100 ; i++)
{
atomicAdd(&a[i], 1.0f);
}
}
void cuTestAtomicAdd(float *a)
{
testAdd<<<1, 10>>>(a);
}
int main(){
float *d_data, *h_data;
h_data=(float *) malloc(100*sizeof(float));
cudaMalloc((void **)&d_data, 100*sizeof(float));
cudaMemset(d_data, 0, 100*sizeof(float));
cuTestAtomicAdd(d_data);
cudaMemcpy(h_data, d_data, 100*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < 100; i++)
if (h_data[i] != 10.0f) {printf("mismatch at %d, was %f, should be %f\n", i, h_data[i], 10.0f); return 1;}
printf("Success\n");
return 0;
}

Related

deep copy of structs to and from device memory

I am having trouble with the deep copy of an array of structs with dynamically allocated member variables in this cuda code. I think it is occurring because &deviceHistogram points to an address on the host instead of an address on the device. I tried making an intermediate pointer variable as in here, but that did not work; how do I properly copy this entire array of structs so I can modify it from the makeHistogram function?
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
typedef struct histogramBin {
int* items;
int count;
} histogramBin;
__host__ __device__ void outputHistogram(histogramBin* histogram, int size) {
for (int i = 0; i < size; i++) {
printf("%d: ", i);
if (!histogram[i].count) {
printf("EMPTY");
} else {
for (int j = 0; j < histogram[i].count; j++) {
printf("%d ", histogram[i].items[j]);
}
}
printf("\n");
}
}
// This function embeds PTX code of CUDA to extract bit field from x.
__device__ uint bfe(uint x, uint start, uint nbits) {
uint bits;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(bits)
: "r"(x), "r"(start), "r"(nbits));
return bits;
}
__global__ void makeHistogram(histogramBin** histogram, int* rH, int rSize, int bit) {
for (int r = 0; r < rSize; r++) {
int thisBin = bfe(rH[r], bit, 1);
int position = (*histogram)[thisBin].count; // **** out of memory access here****
(*histogram)[thisBin].items[position] = rH[r];
(*histogram)[thisBin].count++;
}
}
void histogramDriver(histogramBin* histogram, int* rH, int rSize, int bit) {
int n = 8;
int* deviceRH;
histogramBin* deviceHistogram;
cudaMalloc((void**)&deviceRH, rSize * sizeof(int));
cudaMemcpy(deviceRH, rH, rSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&deviceHistogram, n * sizeof(histogramBin));
cudaMemcpy(deviceHistogram, histogram, n * sizeof(histogramBin), cudaMemcpyHostToDevice);
int* tempData[n];
for (int i = 0; i < n; i++) {
cudaMalloc(&(tempData[i]), rSize * sizeof(int));
}
for (int i = 0; i < n; i++) {
cudaMemcpy(&(deviceHistogram[i].items), &(tempData[i]), sizeof(int*), cudaMemcpyHostToDevice);
}
for (int i = 0; i < n; i++) {
cudaMemcpy(tempData[i], histogram[i].items, rSize * sizeof(int), cudaMemcpyHostToDevice);
}
makeHistogram<<<1, 1>>>(&deviceHistogram, deviceRH, rSize, bit);
cudaDeviceSynchronize();
}
int main(){
int rSize = 5;
int rH[rSize] = {1, 2, 3, 4, 5};
histogramBin * histogram = (histogramBin*)malloc(sizeof(histogramBin) * 8);
for(int i = 0; i < 8; i++){
histogram[i].items = (int*)calloc(sizeof(int), rSize);
histogram[i].count = 0;
}
histogramDriver(histogram, rH, rSize, 0);
return 0;
}
Once it has been copied properly to the device, how do I get it back on the host? For example, if I call outputHistogram(histogram, 5); from inside makeHistogram, I see the following:
0: 2 4
1: 1 3 5
2: EMPTY
3: EMPTY
4: EMPTY
5: EMPTY
6: EMPTY
7: EMPTY
Which is the output I am expecting.
When I call outputHistogram(histogram, 8) from histogramDriver (after the cudaDeviceSynchronize()) I see the following:
0: EMPTY
1: EMPTY
2: EMPTY
3: EMPTY
4: EMPTY
5: EMPTY
6: EMPTY
7: EMPTY
Clearly I am not properly copying the values back from the device to the host.
I have tried copying by doing the reverse procedure from the one in histogramDriver:
for(int i = 0; i < n; i++){
cudaMemcpy(&(tempData[i]), &(deviceHistogram[i].items), sizeof(int*), cudaMemcpyDeviceToHost);
}
for (int i = 0; i < n; i++) {
cudaMemcpy(histogram[i].items, tempData[i], rSize * sizeof(int), cudaMemcpyDeviceToHost);
}
But the output from the outputHistogram call in histogramDriver remains unchanged.
As #talonmies indicated, the biggest problem here is the design of your kernel. There is no reason/need to use a double-pointer for histogram (and indeed, the first iteration of the code you posted did not have that in the kernel prototype, although it was incomplete).
By removing the double-pointer aspect, your code runs without any runtime errors.
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
typedef struct histogramBin {
int* items;
int count;
} histogramBin;
// This function embeds PTX code of CUDA to extract bit field from x.
__device__ uint bfe(uint x, uint start, uint nbits) {
uint bits;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(bits)
: "r"(x), "r"(start), "r"(nbits));
return bits;
}
__global__ void makeHistogram(histogramBin* histogram, int* rH, int rSize, int bit) {
for (int r = 0; r < rSize; r++) {
int thisBin = bfe(rH[r], bit, 1);
int position = histogram[thisBin].count;
histogram[thisBin].items[position] = rH[r];
histogram[thisBin].count++;
}
}
void histogramDriver(histogramBin* histogram, int* rH, int rSize, int bit) {
int n = 8;
int* deviceRH;
histogramBin* deviceHistogram;
cudaMalloc((void**)&deviceRH, rSize * sizeof(int));
cudaMemcpy(deviceRH, rH, rSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&deviceHistogram, n * sizeof(histogramBin));
cudaMemcpy(deviceHistogram, histogram, n * sizeof(histogramBin), cudaMemcpyHostToDevice);
int* tempData[n];
for (int i = 0; i < n; i++) {
cudaMalloc(&(tempData[i]), rSize * sizeof(int));
}
for (int i = 0; i < n; i++) {
cudaMemcpy(&(deviceHistogram[i].items), &(tempData[i]), sizeof(int*), cudaMemcpyHostToDevice);
}
for (int i = 0; i < n; i++) {
cudaMemcpy(tempData[i], histogram[i].items, rSize * sizeof(int), cudaMemcpyHostToDevice);
}
makeHistogram<<<1, 1>>>(deviceHistogram, deviceRH, rSize, bit);
cudaDeviceSynchronize();
}
int main(){
const int rSize = 5;
int rH[rSize] = {1, 2, 3, 4, 5};
histogramBin * histogram = (histogramBin*)malloc(sizeof(histogramBin) * 8);
for(int i = 0; i < 8; i++){
histogram[i].items = (int*)calloc(sizeof(int), rSize);
histogram[i].count = 0;
}
histogramDriver(histogram, rH, rSize, 0);
return 0;
}
$ nvcc t1452.cu -o t1452
$ cuda-memcheck ./t1452
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors
$
Note that the only changes here are to the kernel code itself, plus removal of the ampersand on kernel call, plus I added const to the definition of rSize to get things to compile.
I have no idea if it produces correct output, because you've included no way to inspect the output, nor indicated what you expect the output to be. If you are interested in that, those would be good things to include in your MVE.

Can't get matrix*vector multiplication to go faster in CUDA than in CPU

#include <iostream>
#include <assert.h>
#include <sys/time.h>
#define BLOCK_SIZE 32 // CUDA block size
__device__ inline int getValFromMatrix(int* matrix, int row, int col,int matSize) {
if (row<matSize && col<matSize) {return matrix[row*matSize + col];}
return 0;
}
__device__ inline int getValFromVector(int* vector, int row, int matSize) {
if (row<matSize) {return vector[row];}
return 0;
}
__global__ void matVecMultCUDAKernel(int* aOnGPU, int* bOnGPU, int* cOnGPU, int matSize) {
__shared__ int aRowShared[BLOCK_SIZE];
__shared__ int bShared[BLOCK_SIZE];
__shared__ int myRow;
__shared__ double rowSum;
int myIndexInBlock = threadIdx.x;
myRow = blockIdx.x;
rowSum = 0;
for (int m = 0; m < (matSize / BLOCK_SIZE + 1);m++) {
aRowShared[myIndexInBlock] = getValFromMatrix(aOnGPU,myRow,m*BLOCK_SIZE+myIndexInBlock,matSize);
bShared[myIndexInBlock] = getValFromVector(bOnGPU,m*BLOCK_SIZE+myIndexInBlock,matSize);
__syncthreads(); // Sync threads to make sure all fields have been written by all threads in the block to cShared and xShared
if (myIndexInBlock==0) {
for (int k=0;k<BLOCK_SIZE;k++) {
rowSum += aRowShared[k] * bShared[k];
}
}
}
if (myIndexInBlock==0) {cOnGPU[myRow] = rowSum;}
}
static inline void cudaCheckReturn(cudaError_t result) {
if (result != cudaSuccess) {
std::cerr <<"CUDA Runtime Error: " << cudaGetErrorString(result) << std::endl;
assert(result == cudaSuccess);
}
}
static void matVecMultCUDA(int* aOnGPU,int* bOnGPU, int* cOnGPU, int* c, int sizeOfc, int matSize) {
matVecMultCUDAKernel<<<matSize,BLOCK_SIZE>>>(aOnGPU,bOnGPU,cOnGPU,matSize); // Launch 1 block per row
cudaCheckReturn(cudaMemcpy(c,cOnGPU,sizeOfc,cudaMemcpyDeviceToHost));
}
static void matVecMult(int** A,int* b, int* c, int matSize) {
// Sequential implementation:
for (int i=0;i<matSize;i++) {
c[i]=0;
for (int j=0;j<matSize;j++) {
c[i]+=(A[i][j] * b[j]);
}
}
}
int main() {
int matSize = 1000;
int** A,* b,* c;
int* aOnGPU,* bOnGPU,* cOnGPU;
A = new int*[matSize];
for (int i = 0; i < matSize;i++) {A[i] = new int[matSize]();}
b = new int[matSize]();
c = new int[matSize]();
int aSizeOnGPU = matSize * matSize * sizeof(int), bcSizeOnGPU = matSize * sizeof(int);
cudaCheckReturn(cudaMalloc(&aOnGPU,aSizeOnGPU)); // cudaMallocPitch?
cudaCheckReturn(cudaMalloc(&bOnGPU,bcSizeOnGPU));
cudaCheckReturn(cudaMalloc(&cOnGPU,bcSizeOnGPU));
srand(time(NULL));
for (int i=0;i<matSize;i++) {
b[i] = rand()%100;
for (int j=0;j<matSize;j++) {
A[i][j] = rand()%100;
}
}
for (int i=0;i<matSize;i++) {cudaCheckReturn(cudaMemcpy((aOnGPU+i*matSize),A[i],bcSizeOnGPU,cudaMemcpyHostToDevice));}
cudaCheckReturn(cudaMemcpy(bOnGPU,b,bcSizeOnGPU,cudaMemcpyHostToDevice));
int iters=1;
timeval start,end;
// Sequential run:
gettimeofday(&start,NULL);
for (int i=0;i<iters;i++) {matVecMult(A,b,c,matSize);}
gettimeofday(&end,NULL);
std::cout << (end.tv_sec*1000000 + end.tv_usec) - (start.tv_sec*1000000 + start.tv_usec) << std::endl;
// CUDA run:
gettimeofday(&start,NULL);
for (int i=0;i<iters;i++) {matVecMultCUDA(aOnGPU,bOnGPU,cOnGPU,c,bcSizeOnGPU,matSize);}
gettimeofday(&end,NULL);
std::cout << (end.tv_sec*1000000 + end.tv_usec) - (start.tv_sec*1000000 + start.tv_usec) << std::endl;
cudaCheckReturn(cudaFree(aOnGPU));
cudaCheckReturn(cudaFree(bOnGPU));
cudaCheckReturn(cudaFree(cOnGPU));
for (int i = 0; i < matSize; ++i) {
delete[] A[i];
}
delete[] A;
delete[] b;
delete[] c;
}
Gives:
267171
580253
I've followed the guide on http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory, on how to do a matrix multiplication. I used shared memory for both the matrix (A) and the vector (B), but no matter what matrix size (100*100-20000*20000) or block size (32-1024) i choose, the sequential implementation always outperforms the CUDA implementation in terms of speed, it is about twice as fast.
Since I'm using matrix*vector multiplication, the shared arrays and blocks are handled a bit different; I'm using one block per row of the matrix instead of a 2D block over a part of the matrix.
Is my implementation wrong, or is simply CUDA not faster than the CPU?
First item: You perform checks on boundaries in the cuda implementation where you don't on CPU. Branching are really expensive on a GPU.
Second : You count the cudamemcpy in the cuda performance. It's very uncommon to perform only one multiplication before having to get the result back to cpu.
Usually (on CG for example), you perform several hundreds of multiplication on GPU before having to copy back.
Third: Dont try to implement that (except for educational purposes) and use vendor libraries (like CUBLAS, which ships with every CUDA release), which are extremely hard to outperform.

In C programming how to use the value of an array outside the function where the value of the array is assigned?

In my program I used a function called
void printBoard(char board[26][26], int n);
This is my actual coding:
#include <stdio.h>
#include <stdlib.h>
void printBoard(char board[26][26], int n);
int main(void) {
//variable 'n' determines board dimension
int n;
printf("Enter the board dimension: ");
scanf("%d",&n);
char board[n][n];
printBoard(board, n);
printf("\n\n");
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
printf("%c",board[i][j]);
}
printf("\n");
}
return (EXIT_SUCCESS);
}
void printBoard(char board[26][26], int n){
printf(" ");
for(int i=0;i<n;i++){
printf("%c",97+i);
}
printf("\n");
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
board[i][j]='U';
}
}
board[(n/2)-1][(n/2)-1]='W';
board[n/2][n/2]='W';
board[(n/2)-1][n/2]='B';
board[n/2][(n/2)-1]='B';
for(int i=0;i<n;i++){
printf("%c ",97+i);
for(int j=0;j<n;j++){
printf("%c",board[i][j]);
}
printf("\n");
}
}
The value of the array 'board[26][]26' gets assigned inside the function 'printBoard'. So the value gets lost once I am outside of the scope of this function. But I need to use the value of this array outside of this function. How can I do that?
Thank you
You're not passing the array into printBoard by value, you're just passing a pointer. The changes in the function will persist beyond the function because you're modifying the same memory, not a copy. If you're seeing unexpected output, it's not because of a scope issue with changes made by printBoard.

Thrust: selectively move elements to another vector

I'm trying to figure out the best way to do the following using Thrust: vector A has a million floats, they have some particular order. I want to move to vector B every element x in A for which x>7.0 such that the order of elements is maintain in both vectors A and B. Importantly, only a tiny fraction of elements need be moved. Efficiency is more important for my code than elegance.
My idea was to use thrust::copy_if from A to B and then thrust::remove_if on A. But I don't know the exact number of elements to be copy, and since apparently the memory for B must be allocated in advance, another counting operation is necessary. An inelegant way to skip the counting operation is to pre-allocate "enough" memory for vector B.
Using thrust::remove_copy_if has much the same problems: you need to allocate memory for B in advance, and also it doesn't actually remove anything from A so another thrust::remove_if is required anyway.
Another idea I had was to use thrust::stable_sort with some custom-made comparison functor, to push all elements I want out to the end of A, and then somehow figure out how many there are and thrust::copy them to B. This also looks pretty inelegant...
You're on the right track with thrust::copy_if. Just allocate two more buffers of the same size as the first one. Then copy_if > 7.0f to the first one and copy_if <= 7.0f to the second one. Allocating buffers of the same size as the original buffer is fine as long as you know there's room, and 1 million floats only takes up 4MB.
Edit:
I did a performance comparison of the copy_if and stable_partition approaches. On my card, a GTX660, stable_partition took around 150% as long as copy_if for "split" values of 0.1f, 0.5f and 0.9f. I added tests to ensure that both methods are stable (maintain the order of the values).
#include <cuda.h>
#include <curand.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/partition.h>
#include <iostream>
#include <cassert>
#define CHECK_CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CHECK_CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define SPLIT 0.1f
struct is_low
{
__host__ __device__ bool operator()(const float x)
{
return x <= SPLIT;
}
};
struct is_high
{
__host__ __device__ bool operator()(const float x)
{
return x > SPLIT;
}
};
class EventTimer {
public:
EventTimer() : mStarted(false), mStopped(false) {
cudaEventCreate(&mStart);
cudaEventCreate(&mStop);
}
~EventTimer() {
cudaEventDestroy(mStart);
cudaEventDestroy(mStop);
}
void start(cudaStream_t s = 0) {
cudaEventRecord(mStart, s);
mStarted = true;
mStopped = false;
}
void stop(cudaStream_t s = 0) {
assert(mStarted);
cudaEventRecord(mStop, s);
mStarted = false;
mStopped = true;
}
float elapsed() {
assert(mStopped);
if (!mStopped) return 0;
cudaEventSynchronize(mStop);
float elapsed = 0;
cudaEventElapsedTime(&elapsed, mStart, mStop);
return elapsed;
}
private:
bool mStarted, mStopped;
cudaEvent_t mStart, mStop;
};
int main(int argc, char *argv[])
{
const size_t n = 1024 * 1024 * 50;
// Create prng
curandGenerator_t gen;
CHECK_CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
// Set seed
CHECK_CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL));
// Generate n floats on device
thrust::device_vector<float> vec_rnd_d(n);
float* ptr_rnd_d = thrust::raw_pointer_cast(vec_rnd_d.data());
CHECK_CURAND_CALL(curandGenerateUniform(gen, ptr_rnd_d, n));
thrust::device_vector<float> vec_low_d(n);
thrust::device_vector<float> vec_high_d(n);
for (int i = 0; i < 5; ++i) {
EventTimer timer;
timer.start();
thrust::device_vector<float>::iterator iter_end;
iter_end = thrust::copy_if(vec_rnd_d.begin(), vec_rnd_d.end(), vec_low_d.begin(), is_low());
thrust::copy_if(vec_rnd_d.begin(), vec_rnd_d.end(), vec_high_d.begin(), is_high());
timer.stop();
std::cout << "copy_if: " << timer.elapsed() << "ms" << std::endl;
// check result
thrust::host_vector<float> vec_rnd_h = vec_rnd_d;
thrust::host_vector<float> vec_low_h = vec_low_d;
thrust::host_vector<float> vec_high_h = vec_high_d;
thrust::host_vector<float>::iterator low_iter_h = vec_low_h.begin();
thrust::host_vector<float>::iterator high_iter_h = vec_high_h.begin();
for (thrust::host_vector<float>::iterator rnd_iter_h = vec_rnd_h.begin();
rnd_iter_h != vec_rnd_h.end(); ++rnd_iter_h) {
if (*rnd_iter_h <= SPLIT) {
assert(*low_iter_h == *rnd_iter_h);
++low_iter_h;
}
else {
assert(*high_iter_h == *rnd_iter_h);
++high_iter_h;
}
}
}
for (int i = 0; i < 5; ++i) {
thrust::device_vector<float> vec_rnd_copy = vec_rnd_d;
EventTimer timer;
timer.start();
thrust::device_vector<float>::iterator iter_split =
thrust::stable_partition(vec_rnd_copy.begin(), vec_rnd_copy.end(), is_low());
timer.stop();
size_t n_low = iter_split - vec_rnd_copy.begin();
std::cout << "stable_partition: " << timer.elapsed() << "ms" << std::endl;
// check result
thrust::host_vector<float> vec_rnd_h = vec_rnd_d;
thrust::host_vector<float> vec_partitioned_h = vec_rnd_copy;
thrust::host_vector<float>::iterator low_iter_h = vec_partitioned_h.begin();
thrust::host_vector<float>::iterator high_iter_h = vec_partitioned_h.begin() + n_low;
for (thrust::host_vector<float>::iterator rnd_iter_h = vec_rnd_h.begin();
rnd_iter_h != vec_rnd_h.end(); ++rnd_iter_h) {
if (*rnd_iter_h <= SPLIT) {
assert(*low_iter_h == *rnd_iter_h);
++low_iter_h;
}
else {
assert(*high_iter_h == *rnd_iter_h);
++high_iter_h;
}
}
}
CHECK_CURAND_CALL(curandDestroyGenerator(gen));
return EXIT_SUCCESS;
}
Output:
C:\rd\projects\cpp\test_cuda\Release>test_cuda.exe
copy_if: 40.2919ms
copy_if: 38.0157ms
copy_if: 38.5036ms
copy_if: 37.6751ms
copy_if: 38.1054ms
stable_partition: 59.5473ms
stable_partition: 61.4016ms
stable_partition: 59.1854ms
stable_partition: 61.3195ms
stable_partition: 59.1205ms
To answer my own question, I finally found thrust::stable_partition, which is more efficient and elegant than all "copy_if"-alternatives. It just moves all elements that fail to satisfy a predicate to the end of the array and returns the start of the second sequence. Pointer arithmetic gives the size of B, but in fact it's not necessary anymore:
thrust::device_vector<float>::iterator iter = thrust::stable_partition(A.begin(), A.end(), pred)
thrust::device_vector<float> B(iter, A.end())
A.erase(iter, A.end());

thrust::device_reference can't be used with printf?

I am using the thrust partition function to partition array into even and odd numbers. However, when i try to display the device vector, it shows random values. Please let me know where is the error. I think i have done everything correct.
#include<stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include<thrust/partition.h>
struct is_even
{
//const int toCom;
//is_even(int val):toCom(val){}
__device__
bool operator()(const int &x)
{
return x%2;
}
};
void main(){
thrust::host_vector<int> H(6);
for(int i =0 ; i<H.size();i++){
H[i] = i+1;
}
thrust::device_vector<int> D = H;
thrust::partition(D.begin(),D.end(),is_even());
for(int i =0 ;i< D.size();i++){
printf("%d,",D[i]);
}
getchar();
}
You can't send a thrust::device_reference (i.e., the result of D[i]) through printf's ellipsis because it is not a POD type. See the documentation. Your code will produce a compiler warning to this effect.
Cast to int first:
for(int i = 0; i < D.size(); ++i)
{
printf("%d,", (int) D[i]);
}