I faced an issue I do not understand. I am trying to set values of an array in the device. With int array I am doing this this way:
#define VECTOR_SIZE 8
int main()
{
printf("Start\n");
int *input_d;
int *output_d;
int output_h[VECTOR_SIZE];
int input_h[VECTOR_SIZE] = { 1, 2, 3, 4, 5, 6, 7, 8 };
int size = VECTOR_SIZE*sizeof(int);
cudaMalloc(&input_d,size);
cudaMalloc(&output_d,size);
cudaMemcpy(input_d,input_h,size,cudaMemcpyHostToDevice);
kernel<<<1,VECTOR_SIZE>>>(input_d,output_d);
cudaMemcpy(output_h,output_d,size, cudaMemcpyDeviceToHost);
cudaFree(input_d);
cudaFree(output_d);
return 0;
}
The kernel looks like:
__global__ void kernel(int* input, int* output)
{
int dx = threadIdx.x + (blockDim.x * blockIdx.x);
output[dx] = dx;
}
The output (output_h) is just like I expected {0, 1, 2, 3, 4, 5, 6, 7}. Now when I am trying do the same on float array:
#define VECTOR_SIZE 8
int main()
{
printf("Start\n");
float *input_d;
float *output_d;
float output_h[VECTOR_SIZE];
float input_h[VECTOR_SIZE] = { 1, 2, 3, 4, 5, 6, 7, 8 };
int size = VECTOR_SIZE*sizeof(float);
cudaMalloc(&input_d,size);
cudaMalloc(&output_d,size);
cudaMemcpy(input_d,input_h,size,cudaMemcpyHostToDevice);
kernel<<<1,VECTOR_SIZE>>>(input_d,output_d);
cudaMemcpy(output_h,output_d,size, cudaMemcpyDeviceToHost);
cudaFree(input_d);
cudaFree(output_d);
return 0;
}
with kernel:
__global__ void kernel(float* input, float* output)
{
int dx = threadIdx.x + (blockDim.x * blockIdx.x);
output[dx] = dx;
}
I am receiving zero array on the device in output_h variable.
The full code for handling float arrays:
#include "cuda_runtime.h"
#include <stdio.h>
#define VECTOR_SIZE 8
__global__ void kernel(float* input, float* output)//, int halfSize)
{
int dx = threadIdx.x + (blockDim.x * blockIdx.x);
output[dx] = dx;
}
int main()
{
printf("Start\n");
float *input_d;
float *output_d;
float output_h[VECTOR_SIZE];
float input_h[VECTOR_SIZE] = { 1, 2, 3, 4, 5, 6, 7, 8 };
int size = VECTOR_SIZE*sizeof(float);
cudaMalloc(&input_d,size);
cudaMalloc(&output_d,size);
cudaMemcpy(input_d,input_h,size,cudaMemcpyHostToDevice);
kernel<<<1,VECTOR_SIZE>>>(input_d,output_d);
cudaMemcpy(output_h,output_d,size, cudaMemcpyDeviceToHost);
cudaFree(input_d);
cudaFree(output_d);
int i;
for (i=1; i<=VECTOR_SIZE; i++)
{
printf("%d, ", output_h[i-1]);
}
getchar();
return 0;
}
Both the integer and floating point versions of CUDA code you have posted work perfectly. The only mistake is how you are printing out the values returned by the kernel in the case of the floating point code:
int i;
for (i=1; i<=VECTOR_SIZE; i++)
{
printf("%d, ", output_h[i-1]);
}
should be changed to
int i;
for (i=0; i<VECTOR_SIZE; i++)
{
printf("%f, ", output_h[i]);
}
(note that the %f format is required for printing floating point numbers).
Given that CUDA uses a C++ compiler for host code by default, you should probably prefer iostream to printf - it will work irrespective of the type of the output and not cause the error you are seeing. If I were to write a "universal" version of your example it would look like this:
#include <iostream>
template<typename T>
__global__ void kernel(T* output)
{
int dx = threadIdx.x + (blockDim.x * blockIdx.x);
output[dx] = dx;
}
template<typename T, int VECTOR_SIZE>
void do_run(void)
{
T *output_d;
T output_h[VECTOR_SIZE] = { 999 };
size_t size = sizeof(output_h);
cudaMalloc(&output_d,size);
kernel<T><<<1,VECTOR_SIZE>>>(output_d);
cudaMemcpy(output_h, output_d, size, cudaMemcpyDeviceToHost);
for(int i=0; i<VECTOR_SIZE; i++)
std::cout << output_h[i] << std::endl;
cudaFree(output_d);
}
int main()
{
std::cout << "Integer version" << std::endl;
do_run<int, 8>();
std::cout << "floating point version" << std::endl;
do_run<float, 8>();
return 0;
}
Note that the output code can be used unchanged for both int and float versions, eliminating the possibility of the mistake you made here.
Related
I am having trouble with the deep copy of an array of structs with dynamically allocated member variables in this cuda code. I think it is occurring because &deviceHistogram points to an address on the host instead of an address on the device. I tried making an intermediate pointer variable as in here, but that did not work; how do I properly copy this entire array of structs so I can modify it from the makeHistogram function?
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
typedef struct histogramBin {
int* items;
int count;
} histogramBin;
__host__ __device__ void outputHistogram(histogramBin* histogram, int size) {
for (int i = 0; i < size; i++) {
printf("%d: ", i);
if (!histogram[i].count) {
printf("EMPTY");
} else {
for (int j = 0; j < histogram[i].count; j++) {
printf("%d ", histogram[i].items[j]);
}
}
printf("\n");
}
}
// This function embeds PTX code of CUDA to extract bit field from x.
__device__ uint bfe(uint x, uint start, uint nbits) {
uint bits;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(bits)
: "r"(x), "r"(start), "r"(nbits));
return bits;
}
__global__ void makeHistogram(histogramBin** histogram, int* rH, int rSize, int bit) {
for (int r = 0; r < rSize; r++) {
int thisBin = bfe(rH[r], bit, 1);
int position = (*histogram)[thisBin].count; // **** out of memory access here****
(*histogram)[thisBin].items[position] = rH[r];
(*histogram)[thisBin].count++;
}
}
void histogramDriver(histogramBin* histogram, int* rH, int rSize, int bit) {
int n = 8;
int* deviceRH;
histogramBin* deviceHistogram;
cudaMalloc((void**)&deviceRH, rSize * sizeof(int));
cudaMemcpy(deviceRH, rH, rSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&deviceHistogram, n * sizeof(histogramBin));
cudaMemcpy(deviceHistogram, histogram, n * sizeof(histogramBin), cudaMemcpyHostToDevice);
int* tempData[n];
for (int i = 0; i < n; i++) {
cudaMalloc(&(tempData[i]), rSize * sizeof(int));
}
for (int i = 0; i < n; i++) {
cudaMemcpy(&(deviceHistogram[i].items), &(tempData[i]), sizeof(int*), cudaMemcpyHostToDevice);
}
for (int i = 0; i < n; i++) {
cudaMemcpy(tempData[i], histogram[i].items, rSize * sizeof(int), cudaMemcpyHostToDevice);
}
makeHistogram<<<1, 1>>>(&deviceHistogram, deviceRH, rSize, bit);
cudaDeviceSynchronize();
}
int main(){
int rSize = 5;
int rH[rSize] = {1, 2, 3, 4, 5};
histogramBin * histogram = (histogramBin*)malloc(sizeof(histogramBin) * 8);
for(int i = 0; i < 8; i++){
histogram[i].items = (int*)calloc(sizeof(int), rSize);
histogram[i].count = 0;
}
histogramDriver(histogram, rH, rSize, 0);
return 0;
}
Once it has been copied properly to the device, how do I get it back on the host? For example, if I call outputHistogram(histogram, 5); from inside makeHistogram, I see the following:
0: 2 4
1: 1 3 5
2: EMPTY
3: EMPTY
4: EMPTY
5: EMPTY
6: EMPTY
7: EMPTY
Which is the output I am expecting.
When I call outputHistogram(histogram, 8) from histogramDriver (after the cudaDeviceSynchronize()) I see the following:
0: EMPTY
1: EMPTY
2: EMPTY
3: EMPTY
4: EMPTY
5: EMPTY
6: EMPTY
7: EMPTY
Clearly I am not properly copying the values back from the device to the host.
I have tried copying by doing the reverse procedure from the one in histogramDriver:
for(int i = 0; i < n; i++){
cudaMemcpy(&(tempData[i]), &(deviceHistogram[i].items), sizeof(int*), cudaMemcpyDeviceToHost);
}
for (int i = 0; i < n; i++) {
cudaMemcpy(histogram[i].items, tempData[i], rSize * sizeof(int), cudaMemcpyDeviceToHost);
}
But the output from the outputHistogram call in histogramDriver remains unchanged.
As #talonmies indicated, the biggest problem here is the design of your kernel. There is no reason/need to use a double-pointer for histogram (and indeed, the first iteration of the code you posted did not have that in the kernel prototype, although it was incomplete).
By removing the double-pointer aspect, your code runs without any runtime errors.
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
typedef struct histogramBin {
int* items;
int count;
} histogramBin;
// This function embeds PTX code of CUDA to extract bit field from x.
__device__ uint bfe(uint x, uint start, uint nbits) {
uint bits;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(bits)
: "r"(x), "r"(start), "r"(nbits));
return bits;
}
__global__ void makeHistogram(histogramBin* histogram, int* rH, int rSize, int bit) {
for (int r = 0; r < rSize; r++) {
int thisBin = bfe(rH[r], bit, 1);
int position = histogram[thisBin].count;
histogram[thisBin].items[position] = rH[r];
histogram[thisBin].count++;
}
}
void histogramDriver(histogramBin* histogram, int* rH, int rSize, int bit) {
int n = 8;
int* deviceRH;
histogramBin* deviceHistogram;
cudaMalloc((void**)&deviceRH, rSize * sizeof(int));
cudaMemcpy(deviceRH, rH, rSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&deviceHistogram, n * sizeof(histogramBin));
cudaMemcpy(deviceHistogram, histogram, n * sizeof(histogramBin), cudaMemcpyHostToDevice);
int* tempData[n];
for (int i = 0; i < n; i++) {
cudaMalloc(&(tempData[i]), rSize * sizeof(int));
}
for (int i = 0; i < n; i++) {
cudaMemcpy(&(deviceHistogram[i].items), &(tempData[i]), sizeof(int*), cudaMemcpyHostToDevice);
}
for (int i = 0; i < n; i++) {
cudaMemcpy(tempData[i], histogram[i].items, rSize * sizeof(int), cudaMemcpyHostToDevice);
}
makeHistogram<<<1, 1>>>(deviceHistogram, deviceRH, rSize, bit);
cudaDeviceSynchronize();
}
int main(){
const int rSize = 5;
int rH[rSize] = {1, 2, 3, 4, 5};
histogramBin * histogram = (histogramBin*)malloc(sizeof(histogramBin) * 8);
for(int i = 0; i < 8; i++){
histogram[i].items = (int*)calloc(sizeof(int), rSize);
histogram[i].count = 0;
}
histogramDriver(histogram, rH, rSize, 0);
return 0;
}
$ nvcc t1452.cu -o t1452
$ cuda-memcheck ./t1452
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors
$
Note that the only changes here are to the kernel code itself, plus removal of the ampersand on kernel call, plus I added const to the definition of rSize to get things to compile.
I have no idea if it produces correct output, because you've included no way to inspect the output, nor indicated what you expect the output to be. If you are interested in that, those would be good things to include in your MVE.
#include <iostream>
#include <assert.h>
#include <sys/time.h>
#define BLOCK_SIZE 32 // CUDA block size
__device__ inline int getValFromMatrix(int* matrix, int row, int col,int matSize) {
if (row<matSize && col<matSize) {return matrix[row*matSize + col];}
return 0;
}
__device__ inline int getValFromVector(int* vector, int row, int matSize) {
if (row<matSize) {return vector[row];}
return 0;
}
__global__ void matVecMultCUDAKernel(int* aOnGPU, int* bOnGPU, int* cOnGPU, int matSize) {
__shared__ int aRowShared[BLOCK_SIZE];
__shared__ int bShared[BLOCK_SIZE];
__shared__ int myRow;
__shared__ double rowSum;
int myIndexInBlock = threadIdx.x;
myRow = blockIdx.x;
rowSum = 0;
for (int m = 0; m < (matSize / BLOCK_SIZE + 1);m++) {
aRowShared[myIndexInBlock] = getValFromMatrix(aOnGPU,myRow,m*BLOCK_SIZE+myIndexInBlock,matSize);
bShared[myIndexInBlock] = getValFromVector(bOnGPU,m*BLOCK_SIZE+myIndexInBlock,matSize);
__syncthreads(); // Sync threads to make sure all fields have been written by all threads in the block to cShared and xShared
if (myIndexInBlock==0) {
for (int k=0;k<BLOCK_SIZE;k++) {
rowSum += aRowShared[k] * bShared[k];
}
}
}
if (myIndexInBlock==0) {cOnGPU[myRow] = rowSum;}
}
static inline void cudaCheckReturn(cudaError_t result) {
if (result != cudaSuccess) {
std::cerr <<"CUDA Runtime Error: " << cudaGetErrorString(result) << std::endl;
assert(result == cudaSuccess);
}
}
static void matVecMultCUDA(int* aOnGPU,int* bOnGPU, int* cOnGPU, int* c, int sizeOfc, int matSize) {
matVecMultCUDAKernel<<<matSize,BLOCK_SIZE>>>(aOnGPU,bOnGPU,cOnGPU,matSize); // Launch 1 block per row
cudaCheckReturn(cudaMemcpy(c,cOnGPU,sizeOfc,cudaMemcpyDeviceToHost));
}
static void matVecMult(int** A,int* b, int* c, int matSize) {
// Sequential implementation:
for (int i=0;i<matSize;i++) {
c[i]=0;
for (int j=0;j<matSize;j++) {
c[i]+=(A[i][j] * b[j]);
}
}
}
int main() {
int matSize = 1000;
int** A,* b,* c;
int* aOnGPU,* bOnGPU,* cOnGPU;
A = new int*[matSize];
for (int i = 0; i < matSize;i++) {A[i] = new int[matSize]();}
b = new int[matSize]();
c = new int[matSize]();
int aSizeOnGPU = matSize * matSize * sizeof(int), bcSizeOnGPU = matSize * sizeof(int);
cudaCheckReturn(cudaMalloc(&aOnGPU,aSizeOnGPU)); // cudaMallocPitch?
cudaCheckReturn(cudaMalloc(&bOnGPU,bcSizeOnGPU));
cudaCheckReturn(cudaMalloc(&cOnGPU,bcSizeOnGPU));
srand(time(NULL));
for (int i=0;i<matSize;i++) {
b[i] = rand()%100;
for (int j=0;j<matSize;j++) {
A[i][j] = rand()%100;
}
}
for (int i=0;i<matSize;i++) {cudaCheckReturn(cudaMemcpy((aOnGPU+i*matSize),A[i],bcSizeOnGPU,cudaMemcpyHostToDevice));}
cudaCheckReturn(cudaMemcpy(bOnGPU,b,bcSizeOnGPU,cudaMemcpyHostToDevice));
int iters=1;
timeval start,end;
// Sequential run:
gettimeofday(&start,NULL);
for (int i=0;i<iters;i++) {matVecMult(A,b,c,matSize);}
gettimeofday(&end,NULL);
std::cout << (end.tv_sec*1000000 + end.tv_usec) - (start.tv_sec*1000000 + start.tv_usec) << std::endl;
// CUDA run:
gettimeofday(&start,NULL);
for (int i=0;i<iters;i++) {matVecMultCUDA(aOnGPU,bOnGPU,cOnGPU,c,bcSizeOnGPU,matSize);}
gettimeofday(&end,NULL);
std::cout << (end.tv_sec*1000000 + end.tv_usec) - (start.tv_sec*1000000 + start.tv_usec) << std::endl;
cudaCheckReturn(cudaFree(aOnGPU));
cudaCheckReturn(cudaFree(bOnGPU));
cudaCheckReturn(cudaFree(cOnGPU));
for (int i = 0; i < matSize; ++i) {
delete[] A[i];
}
delete[] A;
delete[] b;
delete[] c;
}
Gives:
267171
580253
I've followed the guide on http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory, on how to do a matrix multiplication. I used shared memory for both the matrix (A) and the vector (B), but no matter what matrix size (100*100-20000*20000) or block size (32-1024) i choose, the sequential implementation always outperforms the CUDA implementation in terms of speed, it is about twice as fast.
Since I'm using matrix*vector multiplication, the shared arrays and blocks are handled a bit different; I'm using one block per row of the matrix instead of a 2D block over a part of the matrix.
Is my implementation wrong, or is simply CUDA not faster than the CPU?
First item: You perform checks on boundaries in the cuda implementation where you don't on CPU. Branching are really expensive on a GPU.
Second : You count the cudamemcpy in the cuda performance. It's very uncommon to perform only one multiplication before having to get the result back to cpu.
Usually (on CG for example), you perform several hundreds of multiplication on GPU before having to copy back.
Third: Dont try to implement that (except for educational purposes) and use vendor libraries (like CUBLAS, which ships with every CUDA release), which are extremely hard to outperform.
I am trying to solve a problem using CUDA Thrust.
I have a host array with 3 elements. Is it possible, using Thrust, to create a device array of 384 elements in which the 3 elements in my host array is repeated 128 times (128 x 3 = 384)?
Generally speaking, starting from an array of 3 elements, how can I use Thrust to generate a device array of size X, where X = Y x 3, i.e. Y is the number of repetitions?
One possible approach:
create a device vector of appropriate size
create 3 strided ranges, one for each of the element positions {1, 2, 3} in the final output (device) vector
use thrust::fill to fill each of the 3 strided ranges with the appropriate (host vector) element {1, 2, 3}
This code is a trivial modification of the strided range example to demonstrate. You can change the REPS define to 128 to see the full expansion to 384 output elements:
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
// for printing
#include <thrust/copy.h>
#include <ostream>
#define STRIDE 3
#define REPS 15 // change to 128 if you like
#define DSIZE (STRIDE*REPS)
// this example illustrates how to make strided access to a range of values
// examples:
// strided_range([0, 1, 2, 3, 4, 5, 6], 1) -> [0, 1, 2, 3, 4, 5, 6]
// strided_range([0, 1, 2, 3, 4, 5, 6], 2) -> [0, 2, 4, 6]
// strided_range([0, 1, 2, 3, 4, 5, 6], 3) -> [0, 3, 6]
// ...
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
int main(void)
{
thrust::host_vector<int> h_data(STRIDE);
h_data[0] = 1;
h_data[1] = 2;
h_data[2] = 3;
thrust::device_vector<int> data(DSIZE);
typedef thrust::device_vector<int>::iterator Iterator;
strided_range<Iterator> pos1(data.begin(), data.end(), STRIDE);
strided_range<Iterator> pos2(data.begin()+1, data.end(), STRIDE);
strided_range<Iterator> pos3(data.begin()+2, data.end(), STRIDE);
thrust::fill(pos1.begin(), pos1.end(), h_data[0]);
thrust::fill(pos2.begin(), pos2.end(), h_data[1]);
thrust::fill(pos3.begin(), pos3.end(), h_data[2]);
// print the generated data
std::cout << "data: ";
thrust::copy(data.begin(), data.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
return 0;
}
Robert Crovella has already answered this question using strided ranges. He has also pointed out the possibility of using the expand operator.
Below, I'm providing a worked example using the expand operator. Opposite to the use of strided ranges, it avoids the need of for loops.
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/sequence.h>
#include <stdio.h>
using namespace thrust::placeholders;
/*************************************/
/* CONVERT LINEAR INDEX TO ROW INDEX */
/*************************************/
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T> {
T Ncols; // --- Number of columns
__host__ __device__ linear_index_to_row_index(T Ncols) : Ncols(Ncols) {}
__host__ __device__ T operator()(T i) { return i / Ncols; }
};
/*******************/
/* EXPAND OPERATOR */
/*******************/
template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
OutputIterator expand(InputIterator1 first1,
InputIterator1 last1,
InputIterator2 first2,
OutputIterator output)
{
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0), thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(), output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output; thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2, output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
/**************************/
/* STRIDED RANGE OPERATOR */
/**************************/
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
/********/
/* MAIN */
/********/
int main(){
/**************************/
/* SETTING UP THE PROBLEM */
/**************************/
const int Nrows = 10; // --- Number of objects
const int Ncols = 3; // --- Number of centroids
thrust::device_vector<int> d_sequence(Nrows * Ncols);
thrust::device_vector<int> d_counts(Ncols, Nrows);
thrust::sequence(d_sequence.begin(), d_sequence.begin() + Ncols);
expand(d_counts.begin(), d_counts.end(), d_sequence.begin(),
thrust::make_permutation_iterator(
d_sequence.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)));
printf("\n\nCentroid indices\n");
for(int i = 0; i < Nrows; i++) {
std::cout << " [ ";
for(int j = 0; j < Ncols; j++)
std::cout << d_sequence[i * Ncols + j] << " ";
std::cout << "]\n";
}
return 0;
}
As an apparently simpler alternative to using CUDA Thrust, I'm posting below a worked example implementing in CUDA the classical Matlab's meshgrid function.
In Matlab
x = [1 2 3];
y = [4 5 6 7];
[X, Y] = meshgrid(x, y);
produces
X =
1 2 3
1 2 3
1 2 3
1 2 3
and
Y =
4 4 4
5 5 5
6 6 6
7 7 7
X is exactly the four-fold replication of the x array, which is the OP's question and first guess of Robert Crovella's answer, while Y is the three-fold consecutive replication of each element of the y array, which is the second guess of Robert Crovella's answer.
Here is the code:
#include <cstdio>
#include <thrust/pair.h>
#include "Utilities.cuh"
#define BLOCKSIZE_MESHGRID_X 16
#define BLOCKSIZE_MESHGRID_Y 16
#define DEBUG
/*******************/
/* MESHGRID KERNEL */
/*******************/
template <class T>
__global__ void meshgrid_kernel(const T * __restrict__ x, size_t Nx, const float * __restrict__ y, size_t Ny, T * __restrict__ X, T * __restrict__ Y)
{
unsigned int tidx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if ((tidx < Nx) && (tidy < Ny)) {
X[tidy * Nx + tidx] = x[tidx];
Y[tidy * Nx + tidx] = y[tidy];
}
}
/************/
/* MESHGRID */
/************/
template <class T>
thrust::pair<T *,T *> meshgrid(const T *x, const unsigned int Nx, const T *y, const unsigned int Ny) {
T *X; gpuErrchk(cudaMalloc((void**)&X, Nx * Ny * sizeof(T)));
T *Y; gpuErrchk(cudaMalloc((void**)&Y, Nx * Ny * sizeof(T)));
dim3 BlockSize(BLOCKSIZE_MESHGRID_X, BLOCKSIZE_MESHGRID_Y);
dim3 GridSize (iDivUp(Nx, BLOCKSIZE_MESHGRID_X), iDivUp(BLOCKSIZE_MESHGRID_Y, BLOCKSIZE_MESHGRID_Y));
meshgrid_kernel<<<GridSize, BlockSize>>>(x, Nx, y, Ny, X, Y);
#ifdef DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
return thrust::make_pair(X, Y);
}
/********/
/* MAIN */
/********/
int main()
{
const int Nx = 3;
const int Ny = 4;
float *h_x = (float *)malloc(Nx * sizeof(float));
float *h_y = (float *)malloc(Ny * sizeof(float));
float *h_X = (float *)malloc(Nx * Ny * sizeof(float));
float *h_Y = (float *)malloc(Nx * Ny * sizeof(float));
for (int i = 0; i < Nx; i++) h_x[i] = i;
for (int i = 0; i < Ny; i++) h_y[i] = i + 4.f;
float *d_x; gpuErrchk(cudaMalloc(&d_x, Nx * sizeof(float)));
float *d_y; gpuErrchk(cudaMalloc(&d_y, Ny * sizeof(float)));
gpuErrchk(cudaMemcpy(d_x, h_x, Nx * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_y, h_y, Ny * sizeof(float), cudaMemcpyHostToDevice));
thrust::pair<float *, float *> meshgrid_pointers = meshgrid(d_x, Nx, d_y, Ny);
float *d_X = (float *)meshgrid_pointers.first;
float *d_Y = (float *)meshgrid_pointers.second;
gpuErrchk(cudaMemcpy(h_X, d_X, Nx * Ny * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_Y, d_Y, Nx * Ny * sizeof(float), cudaMemcpyDeviceToHost));
for (int j = 0; j < Ny; j++) {
for (int i = 0; i < Nx; i++) {
printf("i = %i; j = %i; x = %f; y = %f\n", i, j, h_X[j * Nx + i], h_Y[j * Nx + i]);
}
}
return 0;
}
I'm Trying to bin a 2D array to a texture and to do interpolation between the data. My Problem is. When I'm binding my Array to the texture the the Values i access are total nonsense. Even when I'm trying to acces the first Value (text2D(tex,0.0f,0.0f) i doesn't make sense. So i guess I'm binding it wrong or my memcopy is wrong. Any ideas where my mistake is?
Here is the Code
#include <stdio.h>
#include <iostream>
#include "cuda.h"
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "HelloWorld.h"
#include "linearInterpolation_kernel.cu"
#include "linearInterpolation_kernel2.cu"
#include "linearInterpolation_kernel3.cu"
using namespace std;
using std::cout;
const int blocksize = 16;
__global__
void hello(char *a, int *b) {
a[threadIdx.x] += b[threadIdx.x];
}
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
printf("%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
}
}
int main()
{
int N = 200;
float *A;
A = (float *) malloc(N*sizeof(float));
float *B;
B = (float *) malloc(N*sizeof(float));
float *result;
result = (float *) malloc(N*sizeof(float));
float angle = 0.5f;
for(int i = 0; i < N; i++){
A[i] = (float)rand();
B[i] = (float)rand();
}
cout << A[3] << endl;
cout << B[3] << endl;
ipLinearTexture(A,B,result,angle,N);
float result2;
result2 = (angle)*A[3] + (1-angle)*B[3];
printf(" A %f B %f Result %f\n", A[3], B[3], result[3]);
cout << result2 << endl;
return 1;
}
void ipLinearTexture(float *A, float* B, float* result, float angle, int N)
{
float cuTime;
const int N2 = N;
float *dev_result;
float **AB;
AB = (float **) malloc( N * sizeof(float *));
if(AB)
{
for(int i = 0; i < N; i++)
{
AB[i] = (float *) calloc( 2 , sizeof(float *));
}
}
for (int i = 0; i < N; i++)
{
AB[i][0] = A[i];
AB[i][1] = B[i];
}
cudaMalloc(&dev_result, N * sizeof(float));
unsigned int size = N * 2 * sizeof(float);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray* cu_array;
checkCudaErrors(cudaMallocArray( &cu_array, &channelDesc,N,2 ));
checkCudaErrors(cudaMemcpyToArray( cu_array, 0, 0, AB, size, cudaMemcpyHostToDevice));
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false; // access with normalized texture coordinates
checkCudaErrors(cudaBindTextureToArray( tex, cu_array, channelDesc));
dim3 dimBlock(10, 1, 1);
dim3 dimGrid((int)ceil((double)N*2/dimBlock.x), 1, 1);
transformKernel3<<< dimGrid, dimBlock, 0 >>>( dev_result, N, 2, angle);
checkCudaErrors(cudaUnbindTexture(tex));
cudaMemcpy(result, dev_result, N * sizeof(float), cudaMemcpyKind::cudaMemcpyDeviceToHost);
result[0] = (float)cuTime;
cout << "==================================================" << endl;
for (int i = 0 ; i < N ;i++)
{
cout << result[i] << endl;
}
cout << "==================================================" << endl;
cudaFree(dev_result);
cudaFreeArray(cu_array);
}
Here is the code inside the Kernel
#ifndef _SIMPLETEXTURE_KERNEL3_H_
#define _SIMPLETEXTURE_KERNEL3_H_
// declare texture reference for 2D float texture
texture<float, 1> tex;
////////////////////////////////////////////////////////////////////////////////
//! Transform an image using texture lookups
//! #param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
transformKernel3( float* g_odata, int width, int height, float theta)
{
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < width*height)
{
g_odata[id] = tex1D(tex, xid * 2 + 0.5f);
}
}
#endif // #ifndef _SIMPLETEXTURE_KERNEL_H_
Like the concept in OpenGL, you could think a 2D texture is a rectangle field. The center point of each small rectangle is your array data. So, tex2D(tex, 0.5f/width, 0.5f/height) will be exactly your first value of array data. (width & height is the width and height of 2D array data)
I've two vectors, and after creating a tuple (with zip_iterator) I would order them with sort_by_key and then apply reduce_by_key.
But the reduction by key doesn't work well since it creates an incorrect vector counter. May someone help me? Here is my relevant code snippet.
...
typedef thrust::device_vector<int>::iterator IntIterator;
typedef thrust::tuple<IntIterator, IntIterator> IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
typedef thrust::tuple<int, int> tupla;
...
thrust::device_vector <int> documenti(n);
thrust::device_vector <int> strip(n);
...
ZipIterator bufferBegin (thrust::make_tuple(documenti.begin(),strip.begin()));
ZipIterator bufferEnd (thrust::make_tuple(documenti.end(),strip.end()));
...
thrust::sort_by_key(bufferBegin,bufferEnd, counter.begin());
thrust::device_vector <tupla> example(n);
thrust::reduce_by_key(bufferBegin,bufferEnd, counter.begin(), example.begin(), counter.begin());
thrust::sort_by_key(counter.begin(), counter.begin()+n, example.begin(),thrust::greater <int>());
I'm providing an answer to this question just to remove it from the unanswered list.
Your question appears not much clear to me. From the code snippet you posted, my understanding is that you are interested into a reduction by key with tuple keys.
Below you can find a full worked example. I hope that it could be helpful to future users.
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
// --- Defining key tuple type
typedef thrust::tuple<int,int> Tuple;
typedef thrust::host_vector<Tuple>::iterator dIter1;
typedef thrust::host_vector<float>::iterator dIter2;
/************************************/
/* EQUALITY OPERATOR BETWEEN TUPLES */
/************************************/
struct BinaryPredicate
{
__host__ __device__ bool operator ()
(const Tuple& lhs, const Tuple& rhs)
{
return (thrust::get<0>(lhs) == thrust::get<0>(rhs)) && (thrust::get<1>(lhs) == thrust::get<1>(rhs));
}
};
/********/
/* MAIN */
/********/
int main()
{
const int N = 7;
thrust::host_vector<Tuple> keys_input(N);
thrust::host_vector<float> values_input(N);
int keys1_input[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys 1
int keys2_input[N] = {1, 5, 3, 8, 2, 2, 1}; // input keys 2
float input_values[N] = {9., 8., 7., 6., 5., 4., 3.}; // input values
for (int i=0; i<N; i++) {
keys_input[i] = thrust::make_tuple(keys1_input[i], keys2_input[i]);
values_input[i] = input_values[i];
}
for (int i=0; i<N; i++) printf("%i %i\n", thrust::get<0>(keys_input[i]), thrust::get<1>(keys_input[i]));
thrust::host_vector<Tuple> keys_output(N);
thrust::host_vector<float> values_output(N);
thrust::pair<dIter1, dIter2> new_end;
new_end = thrust::reduce_by_key(keys_input.begin(),
keys_input.end(),
values_input.begin(),
keys_output.begin(),
values_output.begin(),
BinaryPredicate(),
thrust::plus<float>());
int Nkeys = new_end.first - keys_output.begin();
printf("\n\n");
for (int i = 0; i < Nkeys; i++) printf("%i; %f\n", i, values_output[i]);
printf("\n\n");
for (int i = 0; i < Nkeys; i++) printf("%i %i\n", thrust::get<0>(keys_output[i]), thrust::get<1>(keys_output[i]));
return 0;
}
EDIT
The above worked example referred to host_vector's. Below, a fully worked example considering the case when key and value vectors are regular cudaMalloc'ed arrays.
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include "Utilities.cuh"
// --- Defining key tuple type
typedef thrust::tuple<int, int> Tuple;
typedef thrust::device_vector<Tuple>::iterator dIter1;
typedef thrust::device_vector<float>::iterator dIter2;
/************************************/
/* EQUALITY OPERATOR BETWEEN TUPLES */
/************************************/
struct BinaryPredicate
{
__host__ __device__ bool operator ()
(const Tuple& lhs, const Tuple& rhs)
{
return (thrust::get<0>(lhs) == thrust::get<0>(rhs)) && (thrust::get<1>(lhs) == thrust::get<1>(rhs));
}
};
/********/
/* MAIN */
/********/
int main()
{
const int N = 7;
// --- Keys and input values on the host: allocation and definition
int h_keys1_input[N] = { 1, 3, 3, 3, 2, 2, 1 }; // --- Input keys 1 - host side
int h_keys2_input[N] = { 1, 5, 3, 8, 2, 2, 1 }; // --- Input keys 2 - host side
float h_input_values[N] = { 9., 8., 7., 6., 5., 4., 3. }; // --- Input values - host side
// --- Keys and input values on the device: allocation
int *d_keys1_input; gpuErrchk(cudaMalloc(&d_keys1_input, N * sizeof(int))); // --- Input keys 1 - device side
int *d_keys2_input; gpuErrchk(cudaMalloc(&d_keys2_input, N * sizeof(int))); // --- Input keys 2 - device side
float *d_input_values; gpuErrchk(cudaMalloc(&d_input_values, N * sizeof(float))); // --- Input values - device side
// --- Keys and input values: host -> device
gpuErrchk(cudaMemcpy(d_keys1_input, h_keys1_input, N * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_keys2_input, h_keys2_input, N * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_input_values, h_input_values, N * sizeof(float), cudaMemcpyHostToDevice));
// --- From raw pointers to device_ptr
thrust::device_ptr<int> dev_ptr_keys1 = thrust::device_pointer_cast(d_keys1_input);
thrust::device_ptr<int> dev_ptr_keys2 = thrust::device_pointer_cast(d_keys2_input);
thrust::device_ptr<float> dev_ptr_values = thrust::device_pointer_cast(d_input_values);
// --- Declare outputs
thrust::device_vector<Tuple> d_keys_output(N);
thrust::device_vector<float> d_values_output(N);
thrust::pair<dIter1, dIter2> new_end;
auto begin = thrust::make_zip_iterator(thrust::make_tuple(dev_ptr_keys1, dev_ptr_keys2));
auto end = thrust::make_zip_iterator(thrust::make_tuple(dev_ptr_keys1 + N, dev_ptr_keys2 + N));
new_end = thrust::reduce_by_key(begin,
end,
dev_ptr_values,
d_keys_output.begin(),
d_values_output.begin(),
BinaryPredicate(),
thrust::plus<float>());
int Nkeys = new_end.first - d_keys_output.begin();
printf("\n\n");
for (int i = 0; i < Nkeys; i++) {
float output = d_values_output[i];
printf("%i; %f\n", i, output);
}
thrust::host_vector<Tuple> h_keys_output(d_keys_output);
printf("\n\n");
for (int i = 0; i < Nkeys; i++) {
int key1 = thrust::get<0>(h_keys_output[i]);
int key2 = thrust::get<1>(h_keys_output[i]);
printf("%i %i\n", key1, key2);
}
return 0;
}