Memory mapping in Octave with mex-functions - octave

I have a plain C code (running on Linux) and I would like to implement it in Octave, so I thought I could use a mex-file for handling the memory mapping and send the information I received (or send) back and forth to my script in Octave and my sensors. The C code looks like this:
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <poll.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include <sys/mman.h>
#define CUSTOM_IP_MAP_SIZE 0x10000
#define CUSTOM_IP_BASEADDR 0x43C00000
#define CUSTOM_IP_S00_AXI_SLV_REG0_OFFSET 0
#define CUSTOM_IP_S00_AXI_SLV_REG1_OFFSET 4
int main(void)
{
uint32_t leds=0x0;
int fd = open("/dev/uio0", O_RDWR);
void *ptr;
if (fd < 0) {
perror("open");
exit(EXIT_FAILURE);
}
ptr = mmap(NULL, CUSTOM_IP_MAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
while (1) {
leds = *((unsigned *)(ptr + CUSTOM_IP_S00_AXI_SLV_REG1_OFFSET)); //Read from the IP (slv_reg1).
*((unsigned *)(ptr + CUSTOM_IP_S00_AXI_SLV_REG0_OFFSET)) = leds; //Write to the IP (slv_reg0).
}
close(fd);
exit(EXIT_SUCCESS);
}
I compiled the code with no errors and the following command:
mkoctfile --mex mmap.c
I get the following error when I run it in Octave:
error: failed to install .mex file function 'mmap'
Should I keep trying to do this with a mex-function or there is other option better for this?
Thank you for any help.

Related

Including C standard headers in CUDA NVRTC code

I'm writing a CUDA kernel that is compiled at runtime using NVRTC (CUDA version 9.2 with NVRTC version 7.5), which needs the stdint.h header, in order to have the int32_t etc. types.
If I write the kernel source code without the include, it works correctly. For example the kernel
extern "C" __global__ void f() { ... }
Compiles to PTX code where f is defined as .visible .entry f.
But if the kernel source code is
#include <stdint.h>
extern "C" __global__ void f() { ... }
it reports A function without execution space annotations (__host__/__device__/__global__) is considered a host function, and host functions are not allowed in JIT mode. (also without extern "C").
Passing -default-device makes the PTX code .visible .func f, so the function cannot be called from the host.
Is there a way to include headers in the source code, and still have a __global__ entry function? Or alternately, a way to know which integer size convention is used on the by the NVRTC compiler, so that the int32_t etc. types can be manually defined?
Edit:
Example program that shows the problem:
#include <cstdlib>
#include <string>
#include <vector>
#include <memory>
#include <cassert>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <nvrtc.h>
[[noreturn]] void fail(const std::string& msg, int code) {
std::cerr << "error: " << msg << " (" << code << ')' << std::endl;
std::exit(EXIT_FAILURE);
}
std::unique_ptr<char[]> compile_to_ptx(const char* program_source) {
nvrtcResult rv;
// create nvrtc program
nvrtcProgram prog;
rv = nvrtcCreateProgram(
&prog,
program_source,
"program.cu",
0,
nullptr,
nullptr
);
if(rv != NVRTC_SUCCESS) fail("nvrtcCreateProgram", rv);
// compile nvrtc program
std::vector<const char*> options = {
"--gpu-architecture=compute_30"
};
//options.push_back("-default-device");
rv = nvrtcCompileProgram(prog, options.size(), options.data());
if(rv != NVRTC_SUCCESS) {
std::size_t log_size;
rv = nvrtcGetProgramLogSize(prog, &log_size);
if(rv != NVRTC_SUCCESS) fail("nvrtcGetProgramLogSize", rv);
auto log = std::make_unique<char[]>(log_size);
rv = nvrtcGetProgramLog(prog, log.get());
if(rv != NVRTC_SUCCESS) fail("nvrtcGetProgramLog", rv);
assert(log[log_size - 1] == '\0');
std::cerr << "Compile error; log:\n" << log.get() << std::endl;
fail("nvrtcCompileProgram", rv);
}
// get ptx code
std::size_t ptx_size;
rv = nvrtcGetPTXSize(prog, &ptx_size);
if(rv != NVRTC_SUCCESS) fail("nvrtcGetPTXSize", rv);
auto ptx = std::make_unique<char[]>(ptx_size);
rv = nvrtcGetPTX(prog, ptx.get());
if(rv != NVRTC_SUCCESS) fail("nvrtcGetPTX", rv);
assert(ptx[ptx_size - 1] == '\0');
nvrtcDestroyProgram(&prog);
return ptx;
}
const char program_source[] = R"%%%(
//#include <stdint.h>
extern "C" __global__ void f(int* in, int* out) {
out[threadIdx.x] = in[threadIdx.x];
}
)%%%";
int main() {
CUresult rv;
// initialize CUDA
rv = cuInit(0);
if(rv != CUDA_SUCCESS) fail("cuInit", rv);
// compile program to ptx
auto ptx = compile_to_ptx(program_source);
std::cout << "PTX code:\n" << ptx.get() << std::endl;
}
When //#include <stdint.h> in the kernel source is uncommented it no longer compiles. When //options.push_back("-default-device"); is uncommented it compiles but does not mark the function f as .entry.
CMakeLists.txt to compile it (needs CUDA driver API + NVRTC)
cmake_minimum_required(VERSION 3.4)
project(cudabug CXX)
find_package(CUDA REQUIRED)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED 14)
add_executable(cudabug cudabug.cc)
include_directories(SYSTEM ${CUDA_INCLUDE_DIRS})
link_directories(${CUDA_LIBRARY_DIRS})
target_link_libraries(cudabug PUBLIC ${CUDA_LIBRARIES} nvrtc cuda)
[Preface: this is a very hacky answer, and is specific to the GNU toolchain (although I suspect the problem in the question is also specific to the GNU toolchain)].
It would appear that the problem here is with the GNU standard header features.h, which gets pulled into stdint.h and then winds up defining a lot of stub functions which have the default __host__ compilation space. This causes nvrtc to blow up. It also seems that the -default-device option will result in a resolved glibC compiler feature set which makes the whole nvrtc compiler fail.
You can defeat this (in a very hacky way) by predefining a feature set for the standard library which excludes all the host functions. Changing your JIT kernel code to
const char program_source[] = R"%%%(
#define __ASSEMBLER__
#define __extension__
#include <stdint.h>
extern "C" __global__ void f(int32_t* in, int32_t* out) {
out[threadIdx.x] = in[threadIdx.x];
}
)%%%";
got me this:
$ nvcc -std=c++14 -ccbin=g++-7 jit_header.cu -o jitheader -lnvrtc -lcuda
$ ./jitheader
PTX code:
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-24330188
// Cuda compilation tools, release 9.2, V9.2.148
// Based on LLVM 3.4svn
//
.version 6.2
.target sm_30
.address_size 64
// .globl f
.visible .entry f(
.param .u64 f_param_0,
.param .u64 f_param_1
)
{
.reg .b32 %r<3>;
.reg .b64 %rd<8>;
ld.param.u64 %rd1, [f_param_0];
ld.param.u64 %rd2, [f_param_1];
cvta.to.global.u64 %rd3, %rd2;
cvta.to.global.u64 %rd4, %rd1;
mov.u32 %r1, %tid.x;
mul.wide.u32 %rd5, %r1, 4;
add.s64 %rd6, %rd4, %rd5;
ld.global.u32 %r2, [%rd6];
add.s64 %rd7, %rd3, %rd5;
st.global.u32 [%rd7], %r2;
ret;
}
Big caveat: This worked on the glibC system I tried it on. It probably won't work with other toolchains or libC implementations (if, indeed, they have this problem).
Another alternative is creating stand-ins, for some of the standard library headers. NVRTC's API supports your specifying header file contents as strings, associated with header names - before it will go looking through the filesystem for you. This approach is adopted in NVIDIA JITify, and I've adopted it myself working on something else which may or may not be released.
The easy way to do this You can just take the JITify header stubs for stdint.h, limits.h , from here, which I'm also attaching since it's not very long. Alternatively, you can generate this stub yourself to make sure you're not missing out on anything that's relevant from the standard. Here's how that works:
Start with your stdint.h file (or cstdint file as the case may be);
For each include directive in the file (and recursively, for each include in an include etc):
2.1 Figure out whether you can skip including the file altogether (possibly by making a few defines which are known to hold on the GPU).
2.2 If you're not sure you can skip the file - include it entirely and recurse to (2.), or keep it as its own separate header (and apply the whole process in (1.) to it).
You now have a header file which only includes device-safe header files (or none at all)
Partially-preprocess the file, dropping everything that won't be used on a GPU
Remove the lines which might be problematic on a GPU (e.g. #pragma's), and add __device__ __host__ or just __host__ as appropriate to each function declaration.
Important note: Doing this requires paying attention to licenses and copyrights. You would be creating a "derivative work" of glibc and/or JITify and/or StackOverflow contributions etc.
Now, the stdint.h and limits.h from NVIDIA JITify I promised. I've adapted them to not have namespaces:
stdint.h:
#pragma once
#include <limits.h>
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef signed long long int64_t;
typedef signed char int_fast8_t;
typedef signed short int_fast16_t;
typedef signed int int_fast32_t;
typedef signed long long int_fast64_t;
typedef signed char int_least8_t;
typedef signed short int_least16_t;
typedef signed int int_least32_t;
typedef signed long long int_least64_t;
typedef signed long long intmax_t;
typedef signed long intptr_t; //optional
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
typedef unsigned char uint_fast8_t;
typedef unsigned short uint_fast16_t;
typedef unsigned int uint_fast32_t;
typedef unsigned long long uint_fast64_t;
typedef unsigned char uint_least8_t;
typedef unsigned short uint_least16_t;
typedef unsigned int uint_least32_t;
typedef unsigned long long uint_least64_t;
typedef unsigned long long uintmax_t;
#define INT8_MIN SCHAR_MIN
#define INT16_MIN SHRT_MIN
#if defined _WIN32 || defined _WIN64
#define WCHAR_MIN SHRT_MIN
#define WCHAR_MAX SHRT_MAX
typedef unsigned long long uintptr_t; //optional
#else
#define WCHAR_MIN INT_MIN
#define WCHAR_MAX INT_MAX
typedef unsigned long uintptr_t; //optional
#endif
#define INT32_MIN INT_MIN
#define INT64_MIN LLONG_MIN
#define INT8_MAX SCHAR_MAX
#define INT16_MAX SHRT_MAX
#define INT32_MAX INT_MAX
#define INT64_MAX LLONG_MAX
#define UINT8_MAX UCHAR_MAX
#define UINT16_MAX USHRT_MAX
#define UINT32_MAX UINT_MAX
#define UINT64_MAX ULLONG_MAX
#define INTPTR_MIN LONG_MIN
#define INTMAX_MIN LLONG_MIN
#define INTPTR_MAX LONG_MAX
#define INTMAX_MAX LLONG_MAX
#define UINTPTR_MAX ULONG_MAX
#define UINTMAX_MAX ULLONG_MAX
#define PTRDIFF_MIN INTPTR_MIN
#define PTRDIFF_MAX INTPTR_MAX
#define SIZE_MAX UINT64_MAX
limits.h:
#pragma once
#if defined _WIN32 || defined _WIN64
#define __WORDSIZE 32
#else
#if defined __x86_64__ && !defined __ILP32__
#define __WORDSIZE 64
#else
#define __WORDSIZE 32
#endif
#endif
#define MB_LEN_MAX 16
#define CHAR_BIT 8
#define SCHAR_MIN (-128)
#define SCHAR_MAX 127
#define UCHAR_MAX 255
enum {
_JITIFY_CHAR_IS_UNSIGNED = (char)-1 >= 0,
CHAR_MIN = _JITIFY_CHAR_IS_UNSIGNED ? 0 : SCHAR_MIN,
CHAR_MAX = _JITIFY_CHAR_IS_UNSIGNED ? UCHAR_MAX : SCHAR_MAX,
};
#define SHRT_MIN (-32768)
#define SHRT_MAX 32767
#define USHRT_MAX 65535
#define INT_MIN (-INT_MAX - 1)
#define INT_MAX 2147483647
#define UINT_MAX 4294967295U
#if __WORDSIZE == 64
# define LONG_MAX 9223372036854775807L
#else
# define LONG_MAX 2147483647L
#endif
#define LONG_MIN (-LONG_MAX - 1L)
#if __WORDSIZE == 64
#define ULONG_MAX 18446744073709551615UL
#else
#define ULONG_MAX 4294967295UL
#endif
#define LLONG_MAX 9223372036854775807LL
#define LLONG_MIN (-LLONG_MAX - 1LL)
#define ULLONG_MAX 18446744073709551615ULL

My C code won't compile against C code in a `.cu` file

I have 2 files that form a small CUDA library from my previous program (which works well, btw) written on C++.
The header for this library is:
#ifndef __cudaLU__
#define __cudaLU__
#include <assert.h>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <cusolverSp.h>
#include <cusparse.h>
#include <cuComplex.h>
#include <stdlib.h>
void denseLS(int dim,
std::complex<float> * A,
std::complex<float> * b );
void sparseLS(int dim,
std::complex<float> *csrVal,
int *csrRowPtr,
int *csrColInd,
std::complex<float> *vecVal);
#endif
And I want to use this library in my old-as-the-hills C program just by setting procedure in the head of my main.c file:
extern void denseLS(int dim, float complex *A, float complex *b);
And it fails with a bunch of similar errors. Few of them are:
..NA/cudaLS.cu(115): error: namespace "std" has no member "complex"
..NA/cudaLS.cu(115): error: expected a ")"
..NA/cudaLS.cu(137): error: identifier "csrRowPtr" is undefined
..NA/cudaLS.cu(169): error: identifier "csrColInd" is undefined
..NA/cudaLS.cu(170): error: identifier "csrVal" is undefined
..NA/cudaLS.cu(171): error: identifier "vecVal" is undefined
I tried to make a change std::complex -> float complex and nothing works. Still same errors (without std error, ofc).
The cmake instructions file
cmake_minimum_required(VERSION 3.8)
project(NA)
set(CMAKE_C_STANDARD 11)
find_package(GSL REQUIRED)
find_package(CUDA REQUIRED)
include_directories("${CUDA_INCLUDE_DIRS}")
cuda_add_library(solvers STATIC
cudaLS.cu
cudaLS.h)
target_link_libraries(solvers ${CUDA_LIBRARIES} ${CUDA_cusparse_LIBRARY} ${CUDA_cusolver_LIBRARY})
target_compile_features(solvers PUBLIC cxx_std_11)
set_target_properties( solvers
PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
add_executable(NA main.c)
set_target_properties(NA PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
target_link_libraries(NA PRIVATE GSL::gsl m solvers)
What am I doing wrong pals?
UPD:
g++/gcc - 7.3
Linux
Well, I found what exactly I did in a wrong way.
Cmake is OK. But headers in the .h file have to be modified to
extern "C" void denseLS(int dim, cuComplex *A, cuComplex *b );
The cuda functions in .c have to be decleared in the head (or separate .h-file) as
void denseLS(int dim, float complex *A, float complex *b);

Rank of each element in a matrix row using CUDA

Is there any way to find the rank of element in a matrix row separately using CUDA or any functions for the same provided by NVidia?
I don't know of a built-in ranking or argsort function in CUDA or any of the libraries I am familiar with.
You could certainly build such a function out of lower-level operations using thrust for example.
Here is a (non-optimized) outline of a possible solution approach using thrust:
$ cat t84.cu
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <thrust/adjacent_difference.h>
#include <thrust/transform.h>
#include <thrust/iterator/permutation_iterator.h>
#include <iostream>
typedef int mytype;
struct clamp
{
template <typename T>
__host__ __device__
T operator()(T data){
if (data == 0) return 0;
return 1;}
};
int main(){
mytype data[] = {4,1,7,1};
int dsize = sizeof(data)/sizeof(data[0]);
thrust::device_vector<mytype> d_data(data, data+dsize);
thrust::device_vector<int> d_idx(dsize);
thrust::device_vector<int> d_result(dsize);
thrust::sequence(d_idx.begin(), d_idx.end());
thrust::sort_by_key(d_data.begin(), d_data.end(), d_idx.begin(), thrust::less<mytype>());
thrust::device_vector<int> d_diff(dsize);
thrust::adjacent_difference(d_data.begin(), d_data.end(), d_diff.begin());
d_diff[0] = 0;
thrust::transform(d_diff.begin(), d_diff.end(), d_diff.begin(), clamp());
thrust::inclusive_scan(d_diff.begin(), d_diff.end(), d_diff.begin());
thrust::copy(d_diff.begin(), d_diff.end(), thrust::make_permutation_iterator(d_result.begin(), d_idx.begin()));
thrust::copy(d_result.begin(), d_result.end(), std::ostream_iterator<int>(std::cout, ","));
std::cout << std::endl;
}
$ nvcc -arch=sm_61 -o t84 t84.cu
$ ./t84
1,0,2,0,
$
If you are in CUDA, the concept rank is not the same as the one on other languages as openmp or mpi. On that case you will need to go on a global block of the code you need to work with threadIdx.x and blockIdx.x parameters

libcaffe error in cpp eclipse

I just want to use cpp to read LevelDB features extracted from caffe.
I use the following code in eclipse:
// Copyright 2014 BVLC and contributors.
#include <glog/logging.h>
#include <stdio.h> // for snprintf
#include <google/protobuf/text_format.h>
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
#include <string>
#include <vector>
#include <cassert>
#include <iostream>
#include <map>
//#include "cpp/sample.pb.h"
#include "caffe/proto/caffe.pb.h" // for: Datum
using namespace caffe;
#define NUMBER_FEATURES_PER_IMAGE 16
using namespace std;
int main(int argc, char** argv)
{
//google::InitGoogleLogging(argv[0]);
if (argc < 2)
{
printf("ERROR! Not enough arguments!\nusage: %s <feature_folder>", argv[0]);
exit(1);
}
LOG(INFO) << "Creating leveldb object\n";
leveldb::DB* db;
leveldb::Options options;
options.create_if_missing = true;
leveldb::Status status = leveldb::DB::Open(options, argv[1], &db);
assert(status.ok());
leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions());
int i = 0;
double count = 0.0f;
for (it->SeekToFirst(); it->Valid(); it->Next())
{
Datum d;
d.clear_float_data();
d.clear_data();
d.ParseFromString(it->value().ToString());
for (int j = 0; j < d.height(); ++j)
count += d.float_data(j);
i++;
}
assert(it->status().ok());
LOG(INFO) << "Number of datums (or feature vectors): " << i << "\n";;
LOG(INFO) << "Reduction of All Vectors to A Scalar Value: " << count << "\n";
delete it;
}
It builds without error,but when running it says:
/home/deep/cuda-workspace/ReadLevelDB/Debug/ReadLevelDB: error while loading shared libraries: libcaffe.so.1.0.0-rc3: cannot open shared object file: No such file or directory
what is the problem ?
You program fail to find *.so. There are three method:
Create links of *.so in /usr/lib:
ln -s /where/you/install/lib/*.so /usr/lib
sudo ldconfig
Modify LD_LIBRARY_PATH:
export LD_LIBRARY_PATH=/where/you/install/lib:$LD_LIBRARY_PATH
sudo ldconfig
Modify /etc/ld.so.conf:
vim /etc/ld.so.conf
add /where/you/install/lib
sudo ldconfig

cuda Texture declaration compile-time error

I'm trying to compile the following piece of code:
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
texture<float, 2, cudaReadModeElementType> tex;
int main () { ... }
yet, nvcc gives me the following error:
main.c:6:8: error: expected ‘=’, ‘,’, ‘;’, ‘asm’ or ‘__attribute__’ before ‘<’ token
I'm pretty new to CUDA, so I suppose I'm missing something here.
You can only use CUDA syntax in .cu files.