Large data file access from a device function - cuda

A large data file (having 1000000 rows and 2 columns) is required to be accessed from a device function. At every step of computation, the value of a variable changes in the device function and the values of a particular row of the data file is required. So the whole data file should be available in the device function as there is no control over the value of the variable at every step.
In the following program, the value of the variable is sent to a host device function from a device function and then the variable is sent to a host function (where the data file is available) from the host device function.The host function picks up the required values from the entire array and in this way the required values for one particular step may be got at the device function. But this process is not working for the code.
I don't know whether the keyword __host__ __device__ works in cuda V0.2.1221 or not.
Please suggest a way to accss the large data file in a device function.
The required portion of the code is given beow.
__host__
void magnetic( R *xx, R *magfield)
{
float Bx[1000001],By[1000001];
R x[3];
int k;
FILE *fp;
fp = fopen("field.dat", "r");
for (int i=0;i<=1000000;i++)
{
fscanf(fp, "%f%f", &Bx[i], &By[i]);
}
fclose(fp);
printf("%f\t%f\n",Bx[0],By[0]);
for( int i=0;i<3;i++){
x[i]=*xx;
xx++;
}
//printf("%f\t%f\t%f\n",x[0],x[1],x[2]);
k=round((zi+x[2])/dz);
magfield[0]=Bx[k];
magfield[1]=By[k];
magfield[2]=2.;
}
__host__
void magnetic(R *xx, R *magfield);
__host__ __device__
void field(R *xx, R *magfield){
R x[3];
for( int i=0;i<3;i++){
x[i]=*xx;
xx++;
}
magnetic(x,magfield);
}
__host__ __device__
void field( R *xx, R *magfield);
__device__
void eval_rhs( R *f, R *df, R time, int istep) {
R magfield[3],vXB[3];
df[0] = f[3];
df[1] = f[4];
df[2] = f[5];
field( &f[0], magfield);
crossmultiply(&f[3], magfield, vXB);
df[3] = Ex + vXB[0];
df[4] = Ey + vXB[1];
df[5] = Ez + vXB[2];
}

Question :
Please suggest a way to accss the large data file in a device function.
Answer :
Load the file on host side, copy the buffer from system to device memory, and then use this device-side buffer in your kernels and/or device functions.
int main()
{
FILE* file;
float* fileData;
... // Allocate buffer here
... // Open and load file here
float* dev_fileData;
cudaMalloc(/*allocate the same-sized array on device side*/);
cudaMemcpy(/*copy from host-side fileData to device-side*/);
kernel<<<blocks, threads>>>(dev_fileData);
system("pause");
return 0;
}
__device__ void doSomething(float* dev_fileData)
{
// Read (and/or write, but avoid concurrent access) data here
}
__global__ void kernel(float* dev_fileData)
{
// Some kernel code
doSomething(dev_fileData);
// Some other kernel code
}

Related

Can I run a CUDA device function without parallelization or calling it as part of a kernel?

I have a program that loads an image onto a CUDA device, analyzes it with cufft and some custom stuff, and updates a single number on the device which the host then queries as needed. The analysis is mostly parallelized, but the last step sums everything up (using thrust::reduce) for a couple final calculations that aren't parallel.
Once everything is reduced, there's nothing to parallelize, but I can't figure out how to just run a device function without calling it as its own tiny kernel with <<<1, 1>>>. That seems like a hack. Is there a better way to do this? Maybe a way to tell the parallelized kernel "just do these last lines once after the parallel part is finished"?
I feel like this must have been asked before, but I can't find it. Might just not know what to search for though.
Code snip below, I hope I didn't remove anything relevant:
float *d_phs_deltas; // Allocated using cudaMalloc (data is on device)
__device__ float d_Z;
static __global__ void getDists(const cufftComplex* data, const bool* valid, float* phs_deltas)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
// Do stuff with the line indicated by index i
// ...
// Save result into array, gets reduced to single number in setDist
phs_deltas[i] = phs_delta;
}
static __global__ void setDist(const cufftComplex* data, const bool* valid, const float* phs_deltas)
{
// Final step; does it need to be it's own kernel if it only runs once??
d_Z += phs2dst * thrust::reduce(thrust::device, phs_deltas, phs_deltas + d_y);
// Save some other stuff to refer to next frame
// ...
}
void fftExec(unsigned __int32 *host_data)
{
// Copy image to device, do FFT, etc
// ...
// Last parallel analysis step, sets d_phs_deltas
getDists<<<out_blocks, N_THREADS>>>(d_result, d_valid, d_phs_deltas);
// Should this be a serial part at the end of getDists somehow?
setDist<<<1, 1>>>(d_result, d_valid, d_phs_deltas);
}
// d_Z is copied out only on request
void getZ(float *Z) { cudaMemcpyFromSymbol(Z, d_Z, sizeof(float)); }
Thank you!
There is no way to run a device function directly without launching a kernel. As pointed out in comments, there is a working example in the Programming Guide which shows how to use memory fence functions and an atomically incremented counter to signal that a given block is the last block:
__device__ unsigned int count = 0;
__global__ void sum(const float* array, unsigned int N, volatile float* result)
{
__shared__ bool isLastBlockDone;
float partialSum = calculatePartialSum(array, N);
if (threadIdx.x == 0) {
result[blockIdx.x] = partialSum;
// Thread 0 makes sure that the incrementation
// of the "count" variable is only performed after
// the partial sum has been written to global memory.
__threadfence();
// Thread 0 signals that it is done.
unsigned int value = atomicInc(&count, gridDim.x);
// Thread 0 determines if its block is the last
// block to be done.
isLastBlockDone = (value == (gridDim.x - 1));
}
// Synchronize to make sure that each thread reads
// the correct value of isLastBlockDone.
__syncthreads();
if (isLastBlockDone) {
// The last block sums the partial sums
// stored in result[0 .. gridDim.x-1] float totalSum =
calculateTotalSum(result);
if (threadIdx.x == 0) {
// Thread 0 of last block stores the total sum
// to global memory and resets the count
// varilable, so that the next kernel call
// works properly.
result[0] = totalSum;
count = 0;
}
}
}
I would recommend benchmarking both ways and choosing which is faster. On most platforms kernel launch latency is only a few microseconds, so a short running kernel to finish an action after a long running kernel can be the most efficient way to get this done.

A function calls another function in CUDA C++

I have a problem with CUDA programing !
Input is a matrix A( 2 x 2 )
Ouput is a matrix A( 2 x 2 ) with every new value is **3 exponent of the old value **
example :
input : A : { 2,2 } output : A { 8,8 }
{ 2,2 } { 8,8 }
I have 2 function in file CudaCode.CU :
__global__ void Power_of_02(int &a)
{
a=a*a;
}
//***************
__global__ void Power_of_03(int &a)
{
int tempt = a;
Power_of_02(a); //a=a^2;
a= a*tempt; // a = a^3
}
and Kernel :
__global__ void CudaProcessingKernel(int *dataA ) //kernel function
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int tid = bx * XTHREADS + tx;
if(tid < 16)
{
Power_of_03(dataA[tid]);
}
__syncthreads();
}
I think it's right, but the error appear : calling a __global__ function("Power_of_02") from a __global__ function("Power_of_03") is only allowed on the compute_35 architecture or above
Why I wrong ? How to repair it ?
The error is fairly explanatory. A CUDA function decorated with __global__ represents a kernel. Kernels can be launched from host code. On cc 3.5 or higher GPUs, you can also launch a kernel from device code. So if you call a __global__ function from device code (i.e. from another CUDA function that is decorated with __global__ or __device__), then you must be compiling for the appropriate architecture. This is called CUDA dynamic parallelism, and you should read the documentation to learn how to use it, if you want to use it.
When you launch a kernel, whether from host or device code, you must provide a launch configuration, i.e. the information between the triple-chevron notation:
CudaProcessingKernel<<<grid, threads>>>(d_A);
If you want to use your power-of-2 code from another kernel, you will need to call it in a similar, appropriate fashion.
Based on the structure of your code, however, it seems like you can make things work by declaring your power-of-2 and power-of-3 functions as __device__ functions:
__device__ void Power_of_02(int &a)
{
a=a*a;
}
//***************
__device__ void Power_of_03(int &a)
{
int tempt = a;
Power_of_02(a); //a=a^2;
a= a*tempt; // a = a^3
}
This should probably work for you and perhaps was your intent. Functions decorated with __device__ are not kernels (so they are not callable directly from host code) but are callable directly from device code on any architecture. The programming guide will also help to explain the difference.

Accessing cusp variable element from device kernel

I have a problem to access and assign variable with cusp array1d type from device/global kernel. The attached code gives error
alay.cu(8): warning: address of a host variable "p1" cannot be directly taken in a device function
alay.cu(8): error: calling a __host__ function("thrust::detail::vector_base<float, thrust::device_malloc_allocator<float> > ::operator []") from a __global__ function("func") is not allowed
Code Below
#include <cusp/blas.h>
cusp::array1d<float, cusp::device_memory> p1(10,3);
__global__ void func()
{
p1[blockIdx.x]=p1[blockIdx.x]+blockIdx.x*5;
}
int main()
{
func<<<10,1>>>();
return 0;
}
CUSP matrices and arrays (and the Thrust containers they are built with) are intended for host use only. You cannot directly use them in GPU code.
The canonical way to populate a CUSP sparse matrix would be to construct it in host memory and the copy it across to device memory using the copy constructor, so your trivial example becomes this:
cusp::array1d<float, cusp::host_memory> p1(10);
for(int i=0; i<10; i++) p1[i] = 4.f;
cusp::array1d<float, cusp::device_memory> p2(10) = p1; // data now on device
If you want to manipulate a sparse matrix in device code, you will need to have a kernel specifically for whichever format you are interested in, and pass pointers to each of the device arrays holding the matrix data as arguments to that kernel. There is good Doxygen source annotation for all of the sparse types included in the CUSP distribution.
Your edit still doesn't present anything which couldn't be done on the host without a kernel, viz:
cusp::array1d<float, cusp::host_memory> p1(10, 3.f);
for(int i=0; i<10; i++) p1[i] += (i * 5.f);
cusp::array1d<float, cusp::device_memory> p2(10) = p1; // data now on device

Copy array of structures from host to device in CUDA

I am trying to copy an array of structures from host to device in CUDA. For example:
#define N 1000;
#define M 100000;
typedef struct {
int i;
float L[N];
}t ;
__global__ void kernel() {
//do something
}
main () {
t *B, *B_d; // Pointer to host & device arrays of structure
int size = M * sizeof(t);
B=(t*)calloc(M,sizeof(t));
cudaMalloc((void **) &B_d, size); // Allocate array of structure on device
// readind B from file ...
cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
kernel<<<1, 1 >>>();
}
Is that right or not? And how can I use Kernel function?
Now you can declare your kernel as accepting a parameter of type (t *) and pass your B to the kernel call.
Some comments:
1. Using only 1 thread in the kernel call is very ineffective. For optimal results, you need to have multiples of 32 threads in the block.
2. Having array of structures will not allow your code effectively use memory bandwidth. For optimal results, you need to make coalesced reads.

Using std::vector in CUDA device code

The question is that: is there a way to use the class "vector" in Cuda kernels? When I try I get the following error:
error : calling a host function("std::vector<int, std::allocator<int> > ::push_back") from a __device__/__global__ function not allowed
So there a way to use a vector in global section?
I recently tried the following:
create a new Cuda project
go to properties of the project
open Cuda C/C++
go to Device
change the value in "Code Generation" to be set to this value:
compute_20,sm_20
........ after that I was able to use the printf standard library function in my Cuda kernel.
is there a way to use the standard library class vector in the way printf is supported in kernel code? This is an example of using printf in kernel code:
// this code only to count the 3s in an array using Cuda
//private_count is an array to hold every thread's result separately
__global__ void countKernel(int *a, int length, int* private_count)
{
printf("%d\n",threadIdx.x); //it's print the thread id and it's working
// vector<int> y;
//y.push_back(0); is there a possibility to do this?
unsigned int offset = threadIdx.x * length;
int i = offset;
for( ; i < offset + length; i++)
{
if(a[i] == 3)
{
private_count[threadIdx.x]++;
printf("%d ",a[i]);
}
}
}
You can't use the STL in CUDA, but you may be able to use the Thrust library to do what you want. Otherwise just copy the contents of the vector to the device and operate on it normally.
In the cuda library thrust, you can use thrust::device_vector<classT> to define a vector on device, and the data transfer between host STL vector and device vector is very straightforward. you can refer to this useful link:http://docs.nvidia.com/cuda/thrust/index.html to find some useful examples.
you can't use std::vector in device code, you should use array instead.
I think you can implement a device vector by youself, because CUDA supports dynamic memory alloction in device codes. Operator new/delete are also supported. Here is an extremely simple prototype of device vector in CUDA, but it does work. It hasn't been tested sufficiently.
template<typename T>
class LocalVector
{
private:
T* m_begin;
T* m_end;
size_t capacity;
size_t length;
__device__ void expand() {
capacity *= 2;
size_t tempLength = (m_end - m_begin);
T* tempBegin = new T[capacity];
memcpy(tempBegin, m_begin, tempLength * sizeof(T));
delete[] m_begin;
m_begin = tempBegin;
m_end = m_begin + tempLength;
length = static_cast<size_t>(m_end - m_begin);
}
public:
__device__ explicit LocalVector() : length(0), capacity(16) {
m_begin = new T[capacity];
m_end = m_begin;
}
__device__ T& operator[] (unsigned int index) {
return *(m_begin + index);//*(begin+index)
}
__device__ T* begin() {
return m_begin;
}
__device__ T* end() {
return m_end;
}
__device__ ~LocalVector()
{
delete[] m_begin;
m_begin = nullptr;
}
__device__ void add(T t) {
if ((m_end - m_begin) >= capacity) {
expand();
}
new (m_end) T(t);
m_end++;
length++;
}
__device__ T pop() {
T endElement = (*m_end);
delete m_end;
m_end--;
return endElement;
}
__device__ size_t getSize() {
return length;
}
};
You can't use std::vector in device-side code. Why?
It's not marked to allow this
The "formal" reason is that, to use code in your device-side function or kernel, that code itself has to be in a __device__ function; and the code in the standard library, including, std::vector is not. (There's an exception for constexpr code; and in C++20, std::vector does have constexpr methods, but CUDA does not support C++20 at the moment, plus, that constexprness is effectively limited.)
You probably don't really want to
The std::vector class uses allocators to obtain more memory when it needs to grow the storage for the vectors you create or add into. By default (i.e. if you use std::vector<T> for some T) - that allocation is on the heap. While this could be adapted to the GPU - it would be quite slow, and incredibly slow if each "CUDA thread" would dynamically allocate its own memory.
#Now, you could say "But I don't want to allocate memory, I just want to read from the vector!" - well, in that case, you don't need a vector per se. Just copy the data to some on-device buffer, and either pass a pointer and a size, or use a CUDA-capable span, like in cuda-kat. Another option, though a bit "heavier", is to use the [NVIDIA thrust library]'s 3 "device vector" class. Under the hood, it's quite different from the standard library vector though.