I'm trying to figure out how to work with index array in CUDA thrust. My problem is the following:
vector<int> index(20);
vector<float> data1(100), data2(100), result(20);
for(int i=0;i<index.size();++i)
result.push_back(do_something(data1[index[i]],data2[index[i]]));
The function do_something() takes elements from several large arrays selected by index array. The size of index is usually much smaller than the size of data and the elements of index are sorted.
I can't figure out what is the best strategy of doing this efficiently in thrust.
I recommend reading the thrust quick start guide for basic understanding of thrust, and some of the concepts I will use below.
I can't figure out what is the best strategy of doing this efficiently in thrust.
Most of what you have in your example should map directly to straightforward operations in thrust. Your for-loop and do_something operation would be replaced by a thrust algorithm, probably with an appropriate functor definition that would mimic the functionality you have in do_something. An easy-to-use thrust algorithm is thrust::transform() that may be applicable to this case.
The use of the index array for indirection would often be handled using a thrust permutation_iterator, which is designed exactly for this purpose.
Combining these concepts, and supposing for a simple example that your do_something operation will sum the squares of each indexed element in the two input vectors, and then store the square root of that result in the result vector, we could have an example like this:
$ cat t74.cu
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/copy.h>
#include <math.h>
#include <iostream>
struct do_something
{
template <typename T>
__host__ __device__ T operator()(const T &i1, const T &i2){
return sqrtf(i1*i1 + i2*i2);
}
};
int main(){
//pythagorean triples
float d1[] = { 3, 5, 8, 7, 20, 12, 9, 28};
float d2[] = { 4, 12, 15, 24, 21, 35, 40, 45};
int i1[] = {1, 3, 5, 7};
const size_t isize = sizeof(i1)/sizeof(i1[0]);
const size_t dsize = sizeof(d1)/sizeof(d1[0]);
thrust::device_vector<int> index(i1, i1+isize);
thrust::device_vector<float> data1(d1, d1+dsize);
thrust::device_vector<float> data2(d2, d2+dsize);
thrust::device_vector<float> result(isize);
thrust::transform(thrust::make_permutation_iterator(data1.begin(), index.begin()), thrust::make_permutation_iterator(data1.begin(), index.end()), thrust::make_permutation_iterator(data2.begin(), index.begin()), result.begin(), do_something());
thrust::copy_n(result.begin(), result.size(), std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
}
$ nvcc -arch=sm_30 -o t74 t74.cu
$ ./t74
13,25,37,53,
$
The index set need not be sorted for this usage (although it may be somewhat beneficial from a performance perspective).
Related
I would like to conditional copy data from vector, basing on stencil vector, which is N times shorter. Every element in stencil would be responsible for N elements in data vector.
Suppose that the vectors look as follows (N=3)
data = {1,2,3,4,5,6,7,8,9}
stencil = {1,0,1}
What I would like to get in result:
result = {1,2,3,7,8,9}
Is there a way to achieve this using functions from Thrust library?
I know, that there is:
thrust::copy_if (InputIterator1 first, InputIterator1 last, InputIterator2 stencil, OutputIterator result, Predicate pred)
but this doesn't allow me to copy N values from data vector basing on one element from stencil.
As is often the case, I imagine there are many possible ways to do this.
The approach which occurs to me (using copy_if) is to use the stencil vector as part of a thrust::permutation_iterator, that takes the stencil vector and generates the index into it using a thrust::transform_iterator. If we imagine a copying index that goes from 0..8 for this example, then we can index into the "source" (i.e. stencil) vector using a "map" index calculated using a thrust::counting_iterator with integer division by N (using thrust placeholders). The copying predicate just tests if the stencil value == 1.
The thrust quick start guide gives a concise description of how to use these fancy iterators.
Here is a worked example:
$ cat t471.cu
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <iostream>
using namespace thrust::placeholders;
int main(){
int data[] = {1,2,3,4,5,6,7,8,9};
int stencil[] = {1,0,1};
int ds = sizeof(data)/sizeof(data[0]);
int ss = sizeof(stencil)/sizeof(stencil[0]);
int N = ds/ss; // assume this whole number divisible
thrust::device_vector<int> d_data(data, data+ds);
thrust::device_vector<int> d_stencil(stencil, stencil+ss);
thrust::device_vector<int> d_result(ds);
int rs = thrust::copy_if(d_data.begin(), d_data.end(), thrust::make_permutation_iterator(d_stencil.begin(), thrust::make_transform_iterator(thrust::counting_iterator<int>(0), _1 / N)), d_result.begin(), _1 == 1) - d_result.begin();
thrust::copy_n(d_result.begin(), rs, std::ostream_iterator<int>(std::cout, ","));
std::cout << std::endl;
return 0;
}
$ nvcc -o t471 t471.cu
$ ./t471
1,2,3,7,8,9,
$
With the assumptions about stencil organization made here, we could also pre-compute the result size rs with thrust::reduce, and use that to allocate the result vector size:
$ cat t471.cu
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <iostream>
using namespace thrust::placeholders;
int main(){
int data[] = {1,2,3,4,5,6,7,8,9};
int stencil[] = {1,0,1};
int ds = sizeof(data)/sizeof(data[0]);
int ss = sizeof(stencil)/sizeof(stencil[0]);
int N = ds/ss; // assume this whole number divisible
thrust::device_vector<int> d_data(data, data+ds);
thrust::device_vector<int> d_stencil(stencil, stencil+ss);
int rs = thrust::reduce(d_stencil.begin(), d_stencil.end())*N;
thrust::device_vector<int> d_result(rs);
thrust::copy_if(d_data.begin(), d_data.end(), thrust::make_permutation_iterator(d_stencil.begin(), thrust::make_transform_iterator(thrust::counting_iterator<int>(0), _1 / N)), d_result.begin(), _1 == 1) - d_result.begin();
thrust::copy_n(d_result.begin(), rs, std::ostream_iterator<int>(std::cout, ","));
std::cout << std::endl;
return 0;
}
$ nvcc -o t471 t471.cu
$ ./t471
1,2,3,7,8,9,
$
I run into this issue over and over in CUDA. I have, for a set of elements, done some GPU calculation. This results in some value that has linear meaning (for instance, in terms of memory):
element_sizes = [ 10, 100, 23, 45 ]
And now, for the next stage of GPU calculation, I need the following values:
memory_size = sum(element_sizes)
memory_offsets = [ 0, 10, 110, 133 ]
I can calculate memory_size at 80 gbps on my GPU using the reduction code available from NVIDIA. However, I can't use this code, as it uses a branching technique that does not compose the memory offsets array. I have tried many things, but what I have found is that simply copying over elements_sizes to the host and calculating the offsets with a simd for loop is the simplest, fastest, way to go:
// in pseudo code
host_element_sizes = copy_to_host(element_sizes);
host_offsets = (... *) malloc(...);
int total_size = 0;
for(int i = 0; i < ...; ...){
host_offsets[i] = total_size;
total_size += host_element_sizes[i];
}
device_offsets = (... *) device_malloc(...);
device_offsets = copy_to_device(host_offsets,...);
However, I have done this many times now, and it is starting to become a bottleneck. This seems like a typical problem, but I have found no work-around.
What is the expected way for a CUDA programmer to solve this problem?
I think the algorithm you are looking for is a prefix sum. A prefix sum on a vector produces another vector which contains the cumulative sum values of the input vector. A prefix sum exists in at least two variants - an exclusive scan or an inclusive scan. Conceptually these are similar.
If your element_sizes vector has been deposited in GPU global memory (it appears to be the case based on your pseudocode), then there exist library functions that run on the GPU that you could call at that point, to produce the memory_offsets data (vector), and the memory_size value could be trivially obtained from the last value in the vector, with a slight variation based on whether you are doing an inclusive scan or exclusive scan.
Here's a trivial worked example using thrust:
$ cat t319.cu
#include <thrust/scan.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <iostream>
int main(){
const int element_sizes[] = { 10, 100, 23, 45 };
const int ds = sizeof(element_sizes)/sizeof(element_sizes[0]);
thrust::device_vector<int> dv_es(element_sizes, element_sizes+ds);
thrust::device_vector<int> dv_mo(ds);
thrust::exclusive_scan(dv_es.begin(), dv_es.end(), dv_mo.begin());
std::cout << "element_sizes:" << std::endl;
thrust::copy_n(dv_es.begin(), ds, std::ostream_iterator<int>(std::cout, ","));
std::cout << std::endl << "memory_offsets:" << std::endl;
thrust::copy_n(dv_mo.begin(), ds, std::ostream_iterator<int>(std::cout, ","));
std::cout << std::endl << "memory_size:" << std::endl << dv_es[ds-1] + dv_mo[ds-1] << std::endl;
}
$ nvcc -o t319 t319.cu
$ ./t319
element_sizes:
10,100,23,45,
memory_offsets:
0,10,110,133,
memory_size:
178
$
I'm working through the examples of the "CUDA by Example" book. The following code doesn't give me an answer and work as it should. Where's the mistake?
Will appreciate your help and answers.
I get an output,which reads
Calculation done on GPU yields the answer: &d
Press enter to stop
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
__global__ void add_integers_cuda(int a, int b, int *c)
{
*c = a + b;
}
int main(void)
{
int c;
int *dev_ptr;
cudaMalloc((void **)&dev_ptr, sizeof(int)); //allocate sizeof(int) bytes of contiguous memory in the gpu device and return the address of first byte to dev_ptr.
// call the kernel
add_integers_cuda <<<1,1>>>(2,7,dev_ptr);
cudaMemcpy(&c, dev_ptr, sizeof(int), cudaMemcpyDeviceToHost);
printf("Calculation done on GPU yields the answer: &d\n",c );
cudaFree(dev_ptr);
printf("Press enter to stop.");
cin.ignore(255, '\n');
return 0;
}
"
&d is not a correct printf formatting character here:
printf("Calculation done on GPU yields the answer: &d\n",c );
You won't get the output you are expecting.
You should use %d instead:
printf("Calculation done on GPU yields the answer: %d\n",c );
This particular issue has nothing to do with CUDA of course.
You may also want to run CUDA codes with cuda-memcheck and/or use proper CUDA error checking if you are just learning and having trouble. Neither of those would have pointed out the above error, however.
Let us say that I have a single array which stores time stamps for multiple events. For example, T1_e1, T2_e1,....,T1_e2, T2_e2, T3_e2,.....T1_eN, T2,eN,..
I know that Thrust offers a function which computes adjacent differences, but here I need to do it for multiple events. Basically, constructing multiple histograms from a single input array.
So the output would have N different histograms (one for each event) like this:
histogram bins for e1, histogram bins for e2, histogram bins for e3,....histogram bins for eN.
Input1 (timestamps): 100, 101, 104, 105, 101,104, 106, 111, 90, 91, 93, 94,95
Input2 (events): 4123,4123,4123,4123,2129,2129,2129,2129,300,300,300,300,300
output: 4123:(1,2),(2,0),(3,1),(4,0),(5,0)
2129:(1,0),(2,1),(3,1),(4,0),(5,1)
300: (1,2),(2,1),(3,0),(4,),(5,0)
The number of bins will be fixed, i.e. 5 bins per histogram.
Regarding the tuples: (x,y) -> x is the difference between two consecutive time stamps belonging to the same event. y is the count.
If we consider event 4123, the first tuple is (1,2), because the difference between 101 and 100 is 1, and 105 and 104 is 1. So there are two time stamp differences which belong to this bin, hence (1,2).
Can someone please suggest the most efficient way to do this. So far, it seems that I will have to write my own code. But if there are existing solutions, I would like to try them first.
Here's one possible approach that computes the sparse histogram (i.e. non-zero bins only). It should not be difficult to convert a sparse histogram to a dense histogram (zero and non-zero bins included) using thrust scattering.
compute the timestamp differences using thrust::adjacent_difference and a special functor (my_adj_diff) that computes adjacent differences only for like events.
use thrust::remove_if to remove the zero values (one per event) at the start of each event sequence (created by the functor in step 1).
combine events and timestamp differences into a single integer for histogramming using thrust::transform. This just multiplies the event by 100 in my example, and adds the timestamp difference (assumed to be a set of bins of less than 100 max bin).
use the sparse histogram method from the thrust histogram example code.
Here's a fully worked example. The data does not quite match your expected output (non-zero values only) because you have an error in your expected output.
$ cat t678.cu
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/inner_product.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
#define ZIP(X,Y) thrust::make_zip_iterator(thrust::make_tuple(X,Y))
#define SCALE 100
struct my_adj_diff
{
template <typename T>
__host__ __device__
T operator()(T &d2, T &d1) const
{
if (thrust::get<1>(d1) == thrust::get<1>(d2)) {
thrust::get<0>(d2) -= thrust::get<0>(d1);}
else {
thrust::get<0>(d2) = 0;}
return d2;
}
};
struct my_is_zero
{
template <typename T>
__host__ __device__
bool operator()(const T &d1) const
{
return (thrust::get<0>(d1) == 0);
}
};
struct my_combine
{
template <typename T>
__host__ __device__
T operator()(const T &d1, const T &d2) const
{
return (d1*SCALE)+d2;
}
};
// sparse histogram using reduce_by_key
// modified from: https://github.com/thrust/thrust/blob/master/examples/histogram.cu
template <typename Vector1,
typename Vector2,
typename Vector3>
void sparse_histogram(const Vector1& input,
Vector2& histogram_values,
Vector3& histogram_counts)
{
typedef typename Vector1::value_type ValueType; // input value type
typedef typename Vector3::value_type IndexType; // histogram index type
thrust::device_vector<ValueType> data(input);
// sort data to bring equal elements together
thrust::sort(data.begin(), data.end());
// number of histogram bins is equal to number of unique values (assumes data.size() > 0)
IndexType num_bins = thrust::inner_product(data.begin(), data.end() - 1,
data.begin() + 1,
IndexType(1),
thrust::plus<IndexType>(),
thrust::not_equal_to<ValueType>());
// resize histogram storage
histogram_values.resize(num_bins);
histogram_counts.resize(num_bins);
// compact find the end of each bin of values
thrust::reduce_by_key(data.begin(), data.end(),
thrust::constant_iterator<IndexType>(1),
histogram_values.begin(),
histogram_counts.begin());
}
int main(){
int tstamps[] = { 100, 101, 104, 105, 101,104, 106, 111, 90, 91, 93, 94,95 };
int mevents[] = {4123,4123,4123,4123,2129,2129,2129,2129,300,300,300,300,300};
int dsize = sizeof(tstamps)/sizeof(int);
thrust::host_vector<int> h_stamps(tstamps, tstamps+dsize);
thrust::host_vector<int> h_events(mevents, mevents+dsize);
thrust::device_vector<int> d_stamps = h_stamps;
thrust::device_vector<int> d_events = h_events;
thrust::device_vector<int> diffs(dsize);
// compute timestamp differences by event
thrust::adjacent_difference(ZIP(d_stamps.begin(), d_events.begin()), ZIP(d_stamps.end(), d_events.end()), ZIP(d_stamps.begin(), d_events.begin()), my_adj_diff());
d_stamps[0] = 0; // fix up first event for adjacent_difference
int sz1 = thrust::remove_if(ZIP(d_stamps.begin(), d_events.begin()), ZIP(d_stamps.end(), d_events.end()), my_is_zero()) - ZIP(d_stamps.begin(), d_events.begin());
d_stamps.resize(sz1);
d_events.resize(sz1);
// pack events and timestamps into a single vector - assumes max bin (time difference) is less than SCALE
thrust::device_vector<int> d_data(sz1);
thrust::transform(d_events.begin(), d_events.end(), d_stamps.begin(), d_data.begin(), my_combine());
// compute histogram
thrust::device_vector<int> histogram_values;
thrust::device_vector<int> histogram_counts;
sparse_histogram(d_data, histogram_values, histogram_counts);
thrust::copy(histogram_values.begin(), histogram_values.end(), std::ostream_iterator<int>(std::cout, ","));
std::cout << std::endl;
thrust::copy(histogram_counts.begin(), histogram_counts.end(), std::ostream_iterator<int>(std::cout, ","));
std::cout << std::endl;
}
$ nvcc t678.cu -o t678
$ ./t678
30001,30002,212902,212903,212905,412301,412303,
3,1,1,1,1,2,1,
$
Note that you will need to use CUDA 7.0 (not CUDA 7.0 RC or any earlier version of CUDA) or else download the latest thrust master branch from github, because older versions of thrust have an issue when attempting to use zip iterators with thrust::adjacent_difference.
I have been trying to use thrust function reduce_by_key on device vectors. In the documentation they have given example on host_vectors instead of any device vector. The main problem I am getting is in storing the return value of the function. To be more specific here is my code:
thrust::device_vector<int> hashValueVector(10)
thrust::device_vector<int> hashKeysVector(10)
thrust::device_vector<int> d_pathId(10);
thrust::device_vector<int> d_freqError(10); //EDITED
thrust::pair<thrust::detail::normal_iterator<thrust::device_ptr<int> >,thrust::detail::normal_iterator<thrust::device_ptr<int> > > new_end; //THE PROBLEM
new_end = thrust::reduce_by_key(hashKeysVector.begin(),hashKeysVector.end(),hashValueVector.begin(),d_pathId.begin(),d_freqError.begin());
I tried declaring them as device_ptr first in the definition since for host_vectors too they have used pointers in the definition in the documentation. But I am getting compilation error when I try that then I read the error statement and converted the declaration to the above, this is compiling fine but I am not sure whether this is the right way to define or not.
I there any other standard/clean way of declaring that (the "new_end" variable)? Please comment if my question is not clear somewhere.
EDIT: I have edited the declaration of d_freqError. It was supposed to be int I wrote it as hashElem by mistake, sorry for that.
There is a problem in the setup of your reduce_by_key operation:
new_end = thrust::reduce_by_key(hashKeysVector.begin(),hashKeysVector.end(),hashValueVector.begin(),d_pathId.begin(),d_freqError.begin());
Notice that in the documentation it states:
OutputIterator2 is a model of Output Iterator and and InputIterator2's value_type is convertible to OutputIterator2's value_type.
The value type of your InputIterator2 (i.e. hashValueVector.begin()) is int . The value type of your OutputIterator2 is struct hashElem. Thrust is not going to know how to convert an int to a struct hashElem.
Regarding your question, it should not be difficult to capture the return entity from reduce_by_key. According to the documentation it is a thrust pair of two iterators, and these iterators should be consistent with (i.e. of the same vector type and value type) as your keys iterator type and your values iterator type, respectively.
Here's an updated sample based on what you posted, which compiles cleanly:
$ cat t353.cu
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
typedef thrust::device_vector<int>::iterator dIter;
int main(){
thrust::device_vector<int> hashValueVector(10);
thrust::device_vector<int> hashKeysVector(10);
thrust::device_vector<int> d_pathId(10);
thrust::device_vector<int> d_freqError(10);
thrust::sequence(hashValueVector.begin(), hashValueVector.end());
thrust::fill(hashKeysVector.begin(), hashKeysVector.begin()+5, 1);
thrust::fill(hashKeysVector.begin()+6, hashKeysVector.begin()+10, 2);
thrust::pair<dIter, dIter> new_end;
new_end = thrust::reduce_by_key(hashKeysVector.begin(),hashKeysVector.end(),hashValueVector.begin(),d_pathId.begin(),d_freqError.begin());
std::cout << "Number of results are: " << new_end.first - d_pathId.begin() << std::endl;
thrust::copy(d_pathId.begin(), new_end.first, std::ostream_iterator<int>(std::cout, "\n"));
thrust::copy(d_freqError.begin(), new_end.second, std::ostream_iterator<int>(std::cout, "\n"));
}
$ nvcc -arch=sm_20 -o t353 t353.cu
$ ./t353
Number of results are: 3
1
0
2
10
5
30
$