Modifying zip iterator with eigen::Matrix gives errenous results - cuda
I have three set of points X,Y,Z. I intend to apply a transform using Eigen::Matrix4f. I use a zip iterator and a transform operator to do it. The program compiles however the result is only partially correct. this post is inspired by How to modify the contents of a zip iterator
The transformation of
A= [0 1 2;3 4 5;6 7 8; 1 1 1] with M=[1 2 3 4;5 6 7 8;9 10 11 12;13 14 15 16] using M*A should result to: R=[28 34 40; 68 86 104; 108 138 168]
However it gives: R=[28 34 40; 208 251 294; 2410 2905 3400].
The X values are being correctly modified. However the Y & Z values are faulty.
My code and cmakelists is as below:
#include <thrust/iterator/zip_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <Eigen/Dense>
#include <iostream>
typedef thrust::device_vector<float>::iterator FloatIterator;
typedef thrust::tuple<FloatIterator, FloatIterator, FloatIterator> FloatIteratorTuple;
typedef thrust::zip_iterator<FloatIteratorTuple> Float3Iterator;
typedef thrust::tuple<float,float,float> Float3;
struct modify_tuple
{
Eigen::Matrix4f _Mat4f;
modify_tuple(Eigen::Matrix4f Mat4f) : _Mat4f(Mat4f) { }
__host__ __device__ Float3 operator()(Float3 a) const
{
Eigen::Vector4f V(thrust::get<0>(a), thrust::get<1>(a), thrust::get<2>(a), 1.0);
V=_Mat4f*V;
Float3 res=thrust::make_tuple( V(0,0), V(1,0), V(2,0) );
return res;
}
};
int main(void)
{
thrust::device_vector<float> X(3);
thrust::device_vector<float> Y(3);
thrust::device_vector<float> Z(3);
X[0]=0, X[1]=1, X[2]=2;
Y[0]=4, Y[1]=5, Y[2]=6;
Z[0]=7, Z[1]=8, Z[2]=9;
std::cout << "X,Y,Z before transformation="<< std::endl;
thrust::copy_n(X.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Y.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Z.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
Float3Iterator P_first = thrust::make_zip_iterator(make_tuple(X.begin(), Y.begin(), Z.begin()));
Float3Iterator P_last = thrust::make_zip_iterator(make_tuple(X.end(), Y.end(), Z.end()));
Eigen::Matrix4f M;
M(0,0)= 1; M(0,1)= 2; M(0,2)= 3; M(0,3)= 4;
M(1,0)= 5; M(1,1)= 6; M(1,2)= 7; M(1,3)= 8;
M(2,0)= 9; M(2,1)= 10; M(2,2)= 11; M(2,3)= 12;
M(3,0)= 13; M(3,1)= 14; M(3,2)= 15; M(3,3)= 16;
thrust::transform(thrust::device, P_first,P_last, P_first, modify_tuple(M));
std::cout << "X, Y, Z after transformation="<< std::endl;
thrust::copy_n(X.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Y.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Z.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
return 0;
}
CMakeLists.txt
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
FIND_PACKAGE(CUDA REQUIRED)
INCLUDE_DIRECTORIES(${CUDA_INCLUDE_DIRS})
INCLUDE_DIRECTORIES (/usr/include/eigen3)
set(
CUDA_NVCC_FLAGS
${CUDA_NVCC_FLAGS};
-O3 -gencode arch=compute_52,code=sm_52;
)
CUDA_ADD_EXECUTABLE(modify_zip_iterator_stackoverflow_ver2 modify_zip_iterator_stackoverflow_ver2.cu)
TARGET_LINK_LIBRARIES(modify_zip_iterator_stackoverflow_ver2 ${CUDA_LIBRARIES})
Probably you just need to get the latest Eigen.
I used CUDA 9.2 on Fedora27, and grabbed the latest eigen from here.
Then I compiled and ran your code as follows:
$ cat t21.cu
#include <thrust/iterator/zip_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <Eigen/Dense>
#include <iostream>
typedef thrust::device_vector<float>::iterator FloatIterator;
typedef thrust::tuple<FloatIterator, FloatIterator, FloatIterator> FloatIteratorTuple;
typedef thrust::zip_iterator<FloatIteratorTuple> Float3Iterator;
typedef thrust::tuple<float,float,float> Float3;
struct modify_tuple
{
Eigen::Matrix4f _Mat4f;
modify_tuple(Eigen::Matrix4f Mat4f) : _Mat4f(Mat4f) { }
__host__ __device__ Float3 operator()(Float3 a) const
{
Eigen::Vector4f V(thrust::get<0>(a), thrust::get<1>(a), thrust::get<2>(a), 1.0);
V=_Mat4f*V;
Float3 res=thrust::make_tuple( V(0,0), V(1,0), V(2,0) );
return res;
}
};
int main(void)
{
thrust::device_vector<float> X(3);
thrust::device_vector<float> Y(3);
thrust::device_vector<float> Z(3);
X[0]=0, X[1]=1, X[2]=2;
Y[0]=4, Y[1]=5, Y[2]=6;
Z[0]=7, Z[1]=8, Z[2]=9;
std::cout << "X,Y,Z before transformation="<< std::endl;
thrust::copy_n(X.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Y.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Z.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
Float3Iterator P_first = thrust::make_zip_iterator(make_tuple(X.begin(), Y.begin(), Z.begin()));
Float3Iterator P_last = thrust::make_zip_iterator(make_tuple(X.end(), Y.end(), Z.end()));
Eigen::Matrix4f M;
M(0,0)= 1; M(0,1)= 2; M(0,2)= 3; M(0,3)= 4;
M(1,0)= 5; M(1,1)= 6; M(1,2)= 7; M(1,3)= 8;
M(2,0)= 9; M(2,1)= 10; M(2,2)= 11; M(2,3)= 12;
M(3,0)= 13; M(3,1)= 14; M(3,2)= 15; M(3,3)= 16;
thrust::transform(thrust::device, P_first,P_last, P_first, modify_tuple(M));
std::cout << "X, Y, Z after transformation="<< std::endl;
thrust::copy_n(X.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Y.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Z.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
return 0;
}
$ nvcc -std=c++11 -I/path/to/eigen/eigen-eigen-71546f1a9f0c t21.cu -o t21 --expt-relaxed-constexpr
$ ./t21
X,Y,Z before transformation=
0,1,2,
4,5,6,
7,8,9,
X, Y, Z after transformation=
33,39,45,
81,99,117,
129,159,189,
$
The output doesn't match what you are expecting in your question, but what you are expecting is not correct either.
The first tuple provided to your functor as a result of dereferencing your zip iterator will be (X[0],Y[0],Z[0]), which is (0,4,7). Your functor then converts that to (0,4,7,1) and does a matrix-vector multiplication with your M matrix. The first row inner product is given by 0*1+4*2+7*3+1*4, which sum is 33. The second row inner product is given by 0*5+4*6+7*7+1*8, which sum is 81. The third row inner product is given by 0*9+4*10+7*11+1*12, which sum is 129. You can see this sequence 33,81,129 is exactly the first column of the output above.
The second tuple provided to your functor as a result of dereferencing your zip iterator will be (X[1],Y[1],Z[1]), which is (1,5,8). Your functor then converts that to (1,5,8,1) and does a matrix-vector multiplication with your M matrix. The first row inner product is given by 1*1+5*2+8*3+1*4 which sum is 39. The second row innner product is given by 1*5+5*6+8*7+1*8 which sum is 99. The third row inner product is given by 1*9+5*10+8*11+1*12 which sum is 159. You can see this sequence 39,99,159 is exactly the second column of the output above.
I haven't done the corresponding arithmetic for the 3rd column of the output, but I don't think it is wrong.
Here's a modification of your code, demonstrating the correctness of the results, doing the arithmetic in Eigen host code:
$ cat t21.cu
#include <thrust/iterator/zip_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <Eigen/Dense>
#include <iostream>
typedef thrust::device_vector<float>::iterator FloatIterator;
typedef thrust::tuple<FloatIterator, FloatIterator, FloatIterator> FloatIteratorTuple;
typedef thrust::zip_iterator<FloatIteratorTuple> Float3Iterator;
typedef thrust::tuple<float,float,float> Float3;
struct modify_tuple
{
Eigen::Matrix4f _Mat4f;
modify_tuple(Eigen::Matrix4f Mat4f) : _Mat4f(Mat4f) { }
__host__ __device__ Float3 operator()(Float3 a) const
{
Eigen::Vector4f V(thrust::get<0>(a), thrust::get<1>(a), thrust::get<2>(a), 1.0);
V=_Mat4f*V;
Float3 res=thrust::make_tuple( V(0,0), V(1,0), V(2,0) );
return res;
}
};
int main(void)
{
thrust::device_vector<float> X(3);
thrust::device_vector<float> Y(3);
thrust::device_vector<float> Z(3);
X[0]=0, X[1]=1, X[2]=2;
Y[0]=4, Y[1]=5, Y[2]=6;
Z[0]=7, Z[1]=8, Z[2]=9;
std::cout << "X,Y,Z before transformation="<< std::endl;
thrust::copy_n(X.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Y.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Z.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::host_vector<float> hX = X;
thrust::host_vector<float> hY = Y;
thrust::host_vector<float> hZ = Z;
Float3Iterator P_first = thrust::make_zip_iterator(make_tuple(X.begin(), Y.begin(), Z.begin()));
Float3Iterator P_last = thrust::make_zip_iterator(make_tuple(X.end(), Y.end(), Z.end()));
Eigen::Matrix4f M;
M(0,0)= 1; M(0,1)= 2; M(0,2)= 3; M(0,3)= 4;
M(1,0)= 5; M(1,1)= 6; M(1,2)= 7; M(1,3)= 8;
M(2,0)= 9; M(2,1)= 10; M(2,2)= 11; M(2,3)= 12;
M(3,0)= 13; M(3,1)= 14; M(3,2)= 15; M(3,3)= 16;
thrust::transform(thrust::device, P_first,P_last, P_first, modify_tuple(M));
std::cout << "X, Y, Z after transformation="<< std::endl;
thrust::copy_n(X.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Y.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
thrust::copy_n(Z.begin(), 3, std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
Eigen::Vector4f hV;
hV(0) = hX[0];
hV(1) = hY[0];
hV(2) = hZ[0];
hV(3) = 1;
hV = M*hV;
std::cout << "column 0:" << std::endl;
std::cout << hV;
std::cout << std::endl;
hV(0) = hX[1];
hV(1) = hY[1];
hV(2) = hZ[1];
hV(3) = 1;
hV = M*hV;
std::cout << "column 1:" << std::endl;
std::cout << hV;
std::cout << std::endl;
hV(0) = hX[2];
hV(1) = hY[2];
hV(2) = hZ[2];
hV(3) = 1;
hV = M*hV;
std::cout << "column 2:" << std::endl;
std::cout << hV;
std::cout << std::endl;
return 0;
}
$ nvcc -std=c++11 -I/home/bob/eigen/eigen-eigen-71546f1a9f0c t21.cu -o t21 --expt-relaxed-constexpr
$ ./t21
X,Y,Z before transformation=
0,1,2,
4,5,6,
7,8,9,
X, Y, Z after transformation=
33,39,45,
81,99,117,
129,159,189,
column 0:
33
81
129
177
column 1:
39
99
159
219
column 2:
45
117
189
261
$
Related
How to cope with "cudaErrorMissingConfiguration" from "cudaMallocPitch" function of CUDA?
I'm making a Mandelbrot set program with CUDA. However I can't step more unless cudaErrorMissingConfiguration from cudaMallocPitch() function of CUDA is to be solved. Could you tell me something about it? My GPU is GeForce RTX 2060 SUPER. I'll show you my command lines below. > nvcc MandelbrotCUDA.cu -o MandelbrotCUDA -O3 I tried cudaDeviceSetLimit( cudaLimitMallocHeapSize, 7*1024*1024*1024 ) to resize heap size. cudaDeviceSetLimit was success. However I cannot step one more. I cannot print "CUDA malloc done!" #include <iostream> #include <thrust/complex.h> #include <fstream> #include <string> #include <stdlib.h> using namespace std; #define D 0.0000025 // Tick #define LIMIT_N 255 #define INF_NUM 2 #define PLOT_METHOD 2 // dat file : 0, ppm file : 1, ppm file with C : 2 __global__ void calculation(const int indexTotalX, const int indexTotalY, int ***n, thrust::complex<double> ***c){ // n, c are the pointers of dN, dC. for(int i = 0; i < indexTotalY ; i++){ for(int j = 0; j < indexTotalX; j++){ thrust::complex<double> z(0.0f, 0.0f); n[i][j] = 0; for(int ctr=1; ctr <= LIMIT_N ; ctr++){ z = z*z + (*(c[i][j])); n[i][j] = n[i][j] + (abs(z) < INF_NUM); } } } } int main(){ // Data Path string filePath = "Y:\\Documents\\Programming\\mandelbrot\\"; string fileName = "mandelbrot4.ppm"; string filename = filePath+fileName; //complex<double> c[N][M]; double xRange[2] = {-0.76, -0.74}; double yRange[2] = {0.05, 0.1}; const int indexTotalX = (xRange[1]-xRange[0])/D; const int indexTotalY = (yRange[1]-yRange[0])/D; thrust::complex<double> **c; //c = new complex<double> [N]; cout << "debug_n" << endl; int **n; n = new int* [indexTotalY]; c = new thrust::complex<double> * [indexTotalY]; for(int i=0;i<indexTotalY;i++){ n[i] = new int [indexTotalX]; c[i] = new thrust::complex<double> [indexTotalX]; } cout << "debug_n_end" << endl; for(int i = 0; i < indexTotalY; i++){ for(int j = 0; j < indexTotalX; j++){ thrust::complex<double> tmp( xRange[0]+j*D, yRange[0]+i*D ); c[i][j] = tmp; //n[i*sqrt(N)+j] = 0; } } // CUDA malloc cout << "CUDA malloc initializing..." << endl; int **dN; thrust::complex<double> **dC; cudaError_t error; error = cudaDeviceSetLimit(cudaLimitMallocHeapSize, 7*1024*1024*1024); if(error != cudaSuccess){ cout << "cudaDeviceSetLimit's ERROR CODE = " << error << endl; return 0; } size_t tmpPitch; error = cudaMallocPitch((void **)dN, &tmpPitch,(size_t)(indexTotalY*sizeof(int)), (size_t)(indexTotalX*sizeof(int))); if(error != cudaSuccess){ cout << "CUDA ERROR CODE = " << error << endl; cout << "indexTotalX = " << indexTotalX << endl; cout << "indexTotalY = " << indexTotalY << endl; return 0; } cout << "CUDA malloc done!" << endl; This is console messages below. debug_n debug_n_end CUDA malloc initializing... CUDA ERROR CODE = 1 indexTotalX = 8000 indexTotalY = 20000
There are several problems here: int **dN; ... error = cudaMallocPitch((void **)dN, &tmpPitch,(size_t)(indexTotalY*sizeof(int)), (size_t)(indexTotalX*sizeof(int))); The correct type of pointer to use in CUDA allocations is a single pointer: int *dN; not a double pointer: int **dN; (so your kernel where you are trying pass triple-pointers: void calculation(const int indexTotalX, const int indexTotalY, int ***n, thrust::complex<double> ***c){ // n, c are the pointers of dN, dC. is almost certainly not going to work, and should not be designed that way, but that is not the question you are asking.) The pointer is passed to the allocating function by its address: error = cudaMallocPitch((void **)&dN, For cudaMallocPitch, only the horizontal requested dimension is scaled by the size of the data element. The allocation height is not scaled this way. Also, I will assume X corresponds to your allocation width, and Y corresponds to your allocation height, so you also have those parameters reversed: error = cudaMallocPitch((void **)&dN, &tmpPitch,(size_t)(indexTotalX*sizeof(int)), (size_t)(indexTotalY)); The cudaLimitMallocHeapSize should not be necessary to set to make any of this work. It applies only to in-kernel allocations. Reserving 7GB on an 8GB card may also cause problems. Until you are sure you need that (it's not needed for what you have shown) I would simply remove that. $ cat t1488.cu #include <iostream> #include <thrust/complex.h> #include <fstream> #include <string> #include <stdlib.h> using namespace std; #define D 0.0000025 // Tick #define LIMIT_N 255 #define INF_NUM 2 #define PLOT_METHOD 2 // dat file : 0, ppm file : 1, ppm file with C : 2 __global__ void calculation(const int indexTotalX, const int indexTotalY, int ***n, thrust::complex<double> ***c){ // n, c are the pointers of dN, dC. for(int i = 0; i < indexTotalY ; i++){ for(int j = 0; j < indexTotalX; j++){ thrust::complex<double> z(0.0f, 0.0f); n[i][j] = 0; for(int ctr=1; ctr <= LIMIT_N ; ctr++){ z = z*z + (*(c[i][j])); n[i][j] = n[i][j] + (abs(z) < INF_NUM); } } } } int main(){ // Data Path string filePath = "Y:\\Documents\\Programming\\mandelbrot\\"; string fileName = "mandelbrot4.ppm"; string filename = filePath+fileName; //complex<double> c[N][M]; double xRange[2] = {-0.76, -0.74}; double yRange[2] = {0.05, 0.1}; const int indexTotalX = (xRange[1]-xRange[0])/D; const int indexTotalY = (yRange[1]-yRange[0])/D; thrust::complex<double> **c; //c = new complex<double> [N]; cout << "debug_n" << endl; int **n; n = new int* [indexTotalY]; c = new thrust::complex<double> * [indexTotalY]; for(int i=0;i<indexTotalY;i++){ n[i] = new int [indexTotalX]; c[i] = new thrust::complex<double> [indexTotalX]; } cout << "debug_n_end" << endl; for(int i = 0; i < indexTotalY; i++){ for(int j = 0; j < indexTotalX; j++){ thrust::complex<double> tmp( xRange[0]+j*D, yRange[0]+i*D ); c[i][j] = tmp; //n[i*sqrt(N)+j] = 0; } } // CUDA malloc cout << "CUDA malloc initializing..." << endl; int *dN; thrust::complex<double> **dC; cudaError_t error; size_t tmpPitch; error = cudaMallocPitch((void **)&dN, &tmpPitch,(size_t)(indexTotalX*sizeof(int)), (size_t)(indexTotalY)); if(error != cudaSuccess){ cout << "CUDA ERROR CODE = " << error << endl; cout << "indexTotalX = " << indexTotalX << endl; cout << "indexTotalY = " << indexTotalY << endl; return 0; } cout << "CUDA malloc done!" << endl; } $ nvcc -o t1488 t1488.cu t1488.cu(68): warning: variable "dC" was declared but never referenced $ cuda-memcheck ./t1488 ========= CUDA-MEMCHECK debug_n debug_n_end CUDA malloc initializing... CUDA malloc done! ========= ERROR SUMMARY: 0 errors $
namespace::function cannot be used as a function
main.cpp #include "Primes.h" #include <iostream> int main(){ std::string choose; int num1, num2; while(1 == 1){ std::cout << "INSTRUCTIONS" << std::endl << "Enter:" << std::endl << "'c' to check whether a number is a prime," << std::endl << "'u' to view all the prime numbers between two numbers " << "that you want," << std::endl << "'x' to exit," << std::endl << "Enter what you would like to do: "; std::cin >> choose; std::cout << std::endl; if(choose == "c"){ std::cout << "Enter number: "; std::cin >> num1; Primes::checkPrimeness(num1) == 1 ? std::cout << num1 << " is a prime." << std::endl << std::endl : std::cout << num1 << " isn't a prime." << std::endl << std::endl; }else if(choose == "u"){ std::cout << "Enter the number you want to start seeing primes " << "from: "; std::cin >> num1; std::cout << "\nEnter the number you want to stop seeing primes " << "till: "; std::cin >> num2; std::cout << std::endl; for(num1; num1 <= num2; num1++){ Primes::checkPrimeness(num1) == 1 ? std::cout << num1 << " is a prime." << std::endl : std::cout << num1 << " isn't a prime." << std::endl; } }else if(choose == "x"){ return 0; } std::cout << std::endl; } } Primes.h #ifndef PRIMES_H #define PRIMES_H namespace Primes{ extern int num, count; extern bool testPrime; // Returns true if the number is a prime and false if it isn't. int checkPrimeness(num); } #endif Primes.cpp #include "Primes.h" #include <iostream> int Primes::checkPrimeness(num){ if(num < 2){ return(0); }else if(num == 2){ return(1); }else{ for(count = 0; count < num; count++){ for(count = 2; count < num; count++){ if(num % count == 0){ return(0); }else{ testPrime = true; if(count == --num && testPrime == true){ return(1); } } } } } } I get the following 3 errors: Errors from terminal I've spent hours for days and still can't seem to fix the errors. I've tried using extern and pretty much everything I can imagine.
Here is an error in function declaration: int checkPrimeness(num); defines a global integer variable checkPrimeness initialized with num! To declare a function you just should change it like: int checkPrimeness(int); Can't understand why you declare parameters as external variables. To split declarations and realization you should declare all functions and classes inside header file, and define them inside source file.
Error: 2.5e-1 cannot be used as a function
i wrote a simple program and i'm getting this error which i never encountered yet. Can you help me out? line 13: error: 2.5e-1 cannot be used as a function #include <iostream> #include <iomanip> using namespace std; int dirac(int); int main() { float y; for(int k = 0; k <= 4; k++){ y = 2*dirac(k)-0.5*dirac(k-1)*0.25(2*dirac(k-2)-0.5*dirac(k-3)); cout << "k = " << k << ": "; cout << setw(8) << setfill(' '); cout << setprecision(3) << fixed << y << endl; } return 0; } int dirac(int x){ if(x == 0){ x = 1; return x; }else{ x = 0; return x; } }
y = 2*dirac(k)-0.5*dirac(k-1)*0.25(2*dirac(k-2)-0.5*dirac(k-3)); ^--- You probably forgot a * at the indicated spot.
How to use CUB and Thrust in one CUDA code
I'm trying to introduce some CUB into my "old" Thrust code, and so have started with a small example to compare thrust::reduce_by_key with cub::DeviceReduce::ReduceByKey, both applied to thrust::device_vectors. The thrust part of the code is fine, but the CUB part, which naively uses raw pointers obtained via thrust::raw_pointer_cast, crashes after the CUB calls. I put in a cudaDeviceSynchronize() to try to solve this problem, but it didn't help. The CUB part of the code was cribbed from the CUB web pages. On OSX the runtime error is: libc++abi.dylib: terminate called throwing an exception Abort trap: 6 On Linux the runtime error is: terminate called after throwing an instance of 'thrust::system::system_error' what(): an illegal memory access was encountered The first few lines of cuda-memcheck are: ========= CUDA-MEMCHECK ========= Invalid __global__ write of size 4 ========= at 0x00127010 in /home/sdettrick/codes/MCthrust/tests/../cub-1.3.2/cub/device/dispatch/../../block_range/block_range_reduce_by_key.cuh:1017:void cub::ReduceByKeyRegionKernel<cub::DeviceReduceByKeyDispatch<unsigned int*, unsigned int*, float*, float*, int*, cub::Equality, CustomSum, int>::PtxReduceByKeyPolicy, unsigned int*, unsigned int*, float*, float*, int*, cub::ReduceByKeyScanTileState<float, int, bool=1>, cub::Equality, CustomSum, int>(unsigned int*, float*, float*, int*, cub::Equality, CustomSum, int, cub::DeviceReduceByKeyDispatch<unsigned int*, unsigned int*, float*, float*, int*, cub::Equality, CustomSum, int>::PtxReduceByKeyPolicy, unsigned int*, int, cub::GridQueue<int>) ========= by thread (0,0,0) in block (0,0,0) ========= Address 0x7fff7dbb3e88 is out of bounds ========= Saved host backtrace up to driver entry point at kernel launch time Unfortunately I'm not too sure what to do about that. Any help would be greatly appreciated. I tried this on the NVIDIA developer zone but didn't get any responses. The complete example code is below. It should compile with CUDA 6.5 and cub 1.3.2: #include <iostream> #include <thrust/sort.h> #include <thrust/gather.h> #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> //======================================== // for CUB: struct CustomSum { template <typename T> CUB_RUNTIME_FUNCTION __host__ __device__ __forceinline__ //__host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return b+a; } }; //======================================== int main() { const int Nkey=20; int Nseg=9; int ikey[Nkey] = {0, 0, 0, 6, 8, 0, 2, 4, 6, 8, 1, 3, 5, 7, 8, 1, 3, 5, 7, 8}; thrust::device_vector<unsigned int> key(ikey,ikey+Nkey); thrust::device_vector<unsigned int> keysout(Nkey); // Let's reduce x, by key: float xval[Nkey]; for (int i=0; i<Nkey; i++) xval[i]=ikey[i]+0.1f; thrust::device_vector<float> x(xval,xval+Nkey); // First, sort x by key: thrust::sort_by_key(key.begin(),key.end(),x.begin()); //--------------------------------------------------------------------- std::cout<<"=================================================================="<<std::endl <<" THRUST reduce_by_key:"<<std::endl <<"=================================================================="<<std::endl; thrust::device_vector<float> output(Nseg,0.0f); thrust::reduce_by_key(key.begin(), key.end(), x.begin(), keysout.begin(), output.begin()); for (int i=0;i<Nkey;i++) std::cout << x[i] <<" "; std::cout<<std::endl; for (int i=0;i<Nkey;i++) std::cout << key[i] <<" "; std::cout<<std::endl; for (int i=0;i<Nseg;i++) std::cout << output[i] <<" "; std::cout<<std::endl; float ototal=thrust::reduce(output.begin(),output.end()); float xtotal=thrust::reduce(x.begin(),x.end()); std::cout << "total="<< ototal <<", should be "<<xtotal<<std::endl; //--------------------------------------------------------------------- std::cout<<"=================================================================="<<std::endl <<" CUB ReduceByKey:"<<std::endl <<"=================================================================="<<std::endl; unsigned int *d_keys_in =thrust::raw_pointer_cast(&key[0]); float *d_values_in =thrust::raw_pointer_cast(&x[0]); unsigned int *d_keys_out =thrust::raw_pointer_cast(&keysout[0]); float *d_values_out=thrust::raw_pointer_cast(&output[0]); int *d_num_segments=&Nseg; CustomSum reduction_op; std::cout << "CUB input" << std::endl; for (int i=0; i<Nkey; ++i) std::cout << key[i] << " "; std::cout<<std::endl; for (int i=0; i<Nkey; ++i) std::cout << x[i] << " "; std::cout<< std::endl; for (int i=0; i<Nkey; ++i) std::cout << keysout[i] << " "; std::cout<< std::endl; for (int i=0; i<Nseg; ++i) std::cout << output[i] << " "; std::cout<< std::endl; // Determine temporary device storage requirements void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_segments, reduction_op, Nkey); // Allocate temporary storage cudaMalloc(&d_temp_storage, temp_storage_bytes); std::cout << "temp_storage_bytes = " << temp_storage_bytes << std::endl; // Run reduce-by-key cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, d_values_in, d_values_out, d_num_segments, reduction_op, Nkey); cudaDeviceSynchronize(); std::cout << "CUB output" << std::endl; std::cout<<Nkey<<" "<<Nseg<<std::endl; std::cout<<key.size() << " "<<x.size() << " "<<keysout.size() << " "<<output.size() << std::endl; // At this point onward it dies: //libc++abi.dylib: terminate called throwing an exception //Abort trap: 6 // If the next line is uncommented, it crashes the Mac! for (int i=0; i<Nkey; ++i) std::cout << key[i] << " "; std::cout<<std::endl; // for (int i=0; i<Nkey; ++i) std::cout << x[i] << " "; std::cout<< std::endl; // for (int i=0; i<Nkey; ++i) std::cout << keysout[i] << " "; std::cout<< std::endl; // for (int i=0; i<Nseg; ++i) std::cout << output[i] << " "; std::cout<< std::endl; cudaFree(d_temp_storage); ototal=thrust::reduce(output.begin(),output.end()); xtotal=thrust::reduce(x.begin(),x.end()); std::cout << "total="<< ototal <<", should be "<<xtotal<<std::endl; return 1; }
This is not appropriate: int *d_num_segments=&Nseg; You cannot take the address of a host variable and use it as a device pointer. Instead do this: int *d_num_segments; cudaMalloc(&d_num_segments, sizeof(int)); This allocates space on the device for the size of data (a single integer that cub will write to), and assigns the address of that allocation to your d_num_segments variable. This then becomes a valid device pointer. In (*ordinary, non-UM) CUDA, it is illegal dereference a host address in device code, or a device address in host code.
thrust::device_vector not working
I have written a code using Thrust. I am pasting the code and its output below. Strangely, when the device_vector line is reached during exectution the screen just hangs and no more output comes. It was working in the morning. Please help me. #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> int main(void) { // H has storage for 4 integers thrust::host_vector<int> H(4); // initialize individual elements H[0] = 14; H[1] = 20; H[2] = 38; H[3] = 46; // H.size() returns the size of vector H std::cout << "H has size " << H.size() << std::endl; // print contents of H for(size_t i = 0; i < H.size(); i++) std::cout << "H[" << i << "] = " << H[i] << std::endl; // resize H H.resize(2); std::cout << "H now has size " << H.size() << std::endl; // Copy host_vector H to device_vector D thrust::device_vector<int> D = H; // elements of D can be modified D[0] = 99; D[1] = 88; // print contents of D for(size_t i = 0; i < D.size(); i++) std::cout << "D[" << i << "] = " << D[i] << std::endl; // H and D are automatically deleted when the function returns return 0; } The output is : H has size 4 H[0] = 14 H[1] = 20 H[2] = 38 H[3] = 46 H now has size 2 * After this nothing happens
Run Device Query. I am confident that if the code was working in the morning, the problem is due to the graphics card.