How to fix error C4703 in Swig&Python? - swig

I made a c++ class following this.
#include <string>
#include <vector>
namespace Test
{
typedef std::pair<int, int> Values;
class Configuration
{
public:
Configuration(std::string);
Configuration(const Configuration&);
~Configuration();
void setMinVal(int);
void setMaxVal(int);
void setVals(Values);
std::string getPath();
int getMinVal();
int getMaxVal();
Values getVals();
private:
std::string path;
int minVal;
int maxVal;
};
I made swig file following this:
%module Test
%include <std_string.i>
%include <std_vector.i>
%include <std_pair.i>
%{
#include "Test.h"
%}
%template(Values) std::pair<int, int>;
namespace Test {
class Configuration
{
public:
Configuration(std::string);
~Configuration();
void setMinVal(int);
void setMaxVal(int);
void setVals(std::pair<int, int>);
std::string getPath();
int getMinVal();
int getMaxVal();
std::pair<int, int> getVals();
};
}
I run swig command line and compile in visual studio.
But it occurs
error C4703: potentially uninitialized local pointer variable 'p' used
In this wrapper line:
value_type *p;
swig_type_info *descriptor = swig::type_info<value_type>();
res = descriptor ? SWIG_ConvertPtr(obj, (void **)&p, descriptor, 0) : SWIG_ERROR;
if (SWIG_IsOK(res) && val) *val = p;
In 4th line.
How can I fix it?
Thanks.

Related

Use member function as template argument to create a static wrapper

I'm trying to write a c++11 wrapper around a C API, and basically there is a way to register notifications with a static function pointer, which also passes me back an "opaque" pointer, which are provided at a later point, basically a pointer to classes I create, in this example the class foo. Basically, I'm trying to create a static function `helper<..>::call that has the API's signature, but generates code to call my member function on the instance that the c++ wrapper created, and is passed in through an "opaque" pointer along with it. This static function then also converts the arguments when finally calling the member function.
I seem to have this almost working, but I'm having trouble creating a "nicer" public function register_handler in this example, which hides the "uglier" internals. This is the error I'm getting:
test.cpp:154:37: error: no matching function for call to ‘register_handler<&foo::bar>(const char [6])’
154 | register_handler<&foo::bar>("test2"); // <-- trying to wrap it into a function so I can use only one template argument
| ^
test.cpp:137:6: note: candidate: ‘template<class T, class R, class ... Args, R (T::* Func)(Args ...)> void register_handler(const char*)’
137 | void register_handler(const char* name)
| ^~~~~~~~~~~~~~~~
This is my test code:
#include <iostream>
#include <memory>
#include <vector>
#include <map>
#include <cassert>
// inspired by https://stackoverflow.com/a/7943765/2129246
template <typename T>
struct func_traits:
public func_traits<decltype(&T::operator())>
{
};
template <typename R, typename... Args>
struct func_traits<R(*)(Args...)>
{
enum { arity = sizeof...(Args) };
typedef R result_type;
using all_args = std::tuple<Args...>;
template <size_t i>
struct arg
{
typedef typename std::tuple_element<i, std::tuple<Args...>>::type type;
};
};
template <typename C, typename R, typename... Args>
struct func_traits<R(C::*)(Args...) const>
{
enum { arity = sizeof...(Args) };
typedef C class_type;
typedef R result_type;
using all_args = std::tuple<Args...>;
template <size_t i>
struct arg
{
typedef typename std::tuple_element<i, std::tuple<Args...>>::type type;
};
};
template< std::size_t... Ns >
struct indices {
typedef indices< Ns..., sizeof...( Ns ) > next;
};
template< std::size_t N >
struct make_indices {
typedef typename make_indices< N - 1 >::type::next type;
};
template<>
struct make_indices< 0 > {
typedef indices<> type;
};
struct value
{
std::string str_;
template <typename T>
value(T val):
str_(std::to_string(val))
{
}
value(const char* str):
str_(str)
{
}
value(const std::string& str):
str_(str)
{
}
operator int() const
{
return std::stoi(str_);
}
operator double() const
{
return std::stof(str_);
}
operator std::string() const
{
return str_;
}
};
std::map<std::string, void(*)(void*, const std::vector<value>&)> g_handlers;
template <typename T, T>
struct helper;
template <typename T, typename R, typename... Args, R(T::*Func)(Args...)>
struct helper<R(T::*)(Args...), Func>
{
template <size_t... Is>
static void expand(T* obj, const std::vector<value>& args, indices<Is...>)
{
assert(sizeof...(Is) <= args.size());
(obj->*Func)((args[Is])...);
}
static void call(void *p, const std::vector<value>& args)
{
T* obj = reinterpret_cast<T*>(p);
expand(obj, args, typename make_indices<sizeof...(Args)>::type());
}
static void reg_handler(const char* name)
{
g_handlers.insert(std::make_pair(name, call));
};
};
template <typename Obj>
void call_handler(Obj& obj, const char* name, const std::vector<value>& args)
{
auto it = g_handlers.find(name);
if (it != g_handlers.end())
it->second(reinterpret_cast<void*>(&obj), args);
else
std::cout << "handler not registered: " << name << std::endl;
}
// The code below somehow doesn't ever match this template
template <typename T, typename R, typename... Args, R(T::*Func)(Args...)>
void register_handler(const char* name)
{
helper<R(T::*)(Args...), Func>::reg_handler(name);
}
struct foo
{
void bar(int v, const std::string& str, double f)
{
std::cout << "bar: v=" << v << " str=" << str << " f=" << f << std::endl;
};
};
int main()
{
// register member function handlers before we have any instances
helper<decltype(&foo::bar), &foo::bar>::reg_handler("test"); // <-- works, but "ugly" and exposes internal implementation
register_handler<&foo::bar>("test2"); // <-- trying to wrap it into a function so I can use only one template argument
// now we have an instance
foo f;
// call the previously registered handler
call_handler(f, "test", {1, "2", 3.45});
call_handler(f, "test2", {1, "2", 3.45});
return 0;
}
The simple answer for C++11 is: You can't!
From C++17 you are able to use auto also for non type template parameters as a function pointer or member function pointer is not a type here and you have no syntax to describe your function pointer type.
In C++17 you can use it like this:
struct foo
{
void bar(){}
};
template <typename T, T>
struct helper;
template <typename T, typename R, typename... Args, R(T::*Func)(Args...)>
struct helper<R(T::*)(Args...), Func>
{
static void reg_handler(const char* name)
{
// ... here your code continues
}
};
template < auto T >
struct X
{
};
template <typename T, typename R, typename... Args, R(T::*Func)(Args...)>
struct X<Func>
{
static void register_handler( const char* name )
{
helper<R(T::*)(Args...), Func>::reg_handler(name);
}
};
int main()
{
X<&foo::bar>::register_handler("check");
}

Template __host__ __device__ calling host defined functions

During implementation of CUDA code I often need some utility functions, which will be called from device and also from host code. So I declare these functions as __host__ __device__. This is OK and possible device/host incompabilities can be handled by #ifdef CUDA_ARCH.
Problems come when the utility function is templated ie. by some functor type. If the template instance calls a __host__ function I get this warning:
calling a __host__ function from a __host__ __device__ function is not allowed
detected during instantiation of "int foo(const T &) [with T=HostObject]"
Only solution I know is to define the function twice - once for device and once for host code with different name (I cannot overload on __host__ __device__). But this means that there is code duplication and all other __host__ __device__ functions which will call it, must be also defined twice (even more code duplication).
Simplified example:
#include <cuda.h>
#include <iostream>
struct HostObject {
__host__
int value() const { return 42; }
};
struct DeviceObject {
__device__
int value() const { return 3; }
};
template <typename T>
__host__ __device__
int foo(const T &obj) {
return obj.value();
}
/*
template <typename T>
__host__
int foo_host(const T &obj) {
return obj.value();
}
template <typename T>
__device__
int foo_device(const T &obj) {
return obj.value();
}
*/
__global__ void kernel(int *data) {
data[threadIdx.x] = foo(DeviceObject());
}
int main() {
foo(HostObject());
int *data;
cudaMalloc((void**)&data, sizeof(int) * 64);
kernel<<<1, 64>>>(data);
cudaThreadSynchronize();
cudaFree(data);
}
Warning is caused by the foo(HostObject()); call inside the main() function.
foo_host<> and foo_device<> are possible replacements for the problematic foo<>.
Is there a better solution? Can I prevent instantion of foo() on the device side?
You cannot prevent instantiation of either half of a __host__ __device__ function template instantiation. If you instantiate the function by calling it on the host (device), the compiler will also instantiate the device (host) half.
The best you can do for your use case as of CUDA 7.0 is to suppress the warning using #pragma hd_warning_disable as in the following example and ensure that the function is not called incorrectly.
#include <iostream>
#include <cstdio>
#pragma hd_warning_disable
template<class Function>
__host__ __device__
void invoke(Function f)
{
f();
}
struct host_only
{
__host__
void operator()()
{
std::cout << "host_only()" << std::endl;
}
};
struct device_only
{
__device__
void operator()()
{
printf("device_only(): thread %d\n", threadIdx.x);
}
};
__global__
void kernel()
{
// use from device with device functor
invoke(device_only());
// XXX error
// invoke(host_only());
}
int main()
{
// use from host with host functor
invoke(host_only());
kernel<<<1,1>>>();
cudaDeviceSynchronize();
// XXX error
// invoke(device_only());
return 0;
}
I was struggling with the same problem, and found half of a solution. One can overload the host and device function by adding dummy template parameters to them.
In device code, the __device__ "overload" of f is called, in host code the __host__ "overload" of f is called.
Unfortunately, this makes f to a template function. In particular, for constructors this can make big problems (which I am still struggling with).
#include <type_traits>
#include <cstdio>
#ifndef __CUDA_ARCH__
static constexpr bool in_cuda_code = false;
#else
static constexpr bool in_cuda_code = true;
#endif
__device__ void g_device() { printf( "device\n" ); };
__host__ void g_host() { printf( "host\n" ); };
template< bool b = in_cuda_code > void f();
template<> __device__ void f<true>() { g_device(); }
template<> __host__ void f<false>() { g_host(); }
__global__ void kernel () {
f();
}
int main() {
f();
kernel<<<1,1>>>();
cudaDeviceSynchronize();
}

How to use make_transform_iterator() with counting_iterator<> and execution_policy in Thrust?

I try to compile this code with MSVS2012, CUDA5.5, Thrust 1.7:
#include <iostream>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/find.h>
#include <thrust/execution_policy.h>
struct is_odd {
__host__ __device__ bool operator()(uint64_t &x) {
return x & 1;
}
};
int main() {
thrust::counting_iterator<uint64_t> first(0);
thrust::counting_iterator<uint64_t> last = first + 100;
auto iter = thrust::find(thrust::device,
thrust::make_transform_iterator(first, is_odd()),
thrust::make_transform_iterator(last, is_odd()),
true);
int bbb; std::cin >> bbb;
return 0;
}
and get an error:
Error 1 error : incomplete type is not allowed C:\Program Files\NVIDIA
GPU Computing Toolkit\CUDA\v5.5\include\thrust\detail\type_traits.h
413 1 HostDevice
If I use host/device_vector instead of counting_iterator then all ok. What's wrong?
I changed your functor definition slightly, from this:
struct is_odd {
__host__ __device__ bool operator()(uint64_t &x) {
to this:
struct is_odd : public thrust::unary_function<uint64_t, bool> {
__host__ __device__ bool operator()(const uint64_t &x) {
and it compiled for me.

how to assign a "const void*" to a "const uint64_t*" in cuda c?

I want to assign "const void*" to a "const uint64_t*" in cuda c.
I have done like this,
void func(const void *buffer)
{
const uint64_t *words = buffer;
}
but i'm getting an error like this,
error: a value of type "const void *" cannot be used to initialize an
entity of type "const uint64_t *"
can anyone help me in solving this problem?
As #sharptooth indicated, this fixed it for me:
#include <stdio.h>
#include <stdint.h>
void func(const void *buffer)
{
const uint64_t *words = (const uint64_t *) buffer;
}
int main(){
void *my_buf=0;
func(my_buf);
return 0;
}

How to advance iterator in thrust function

I'm doing some study on thrust. But I didn't understand how to get the value of an iterator point to.
An example code is like:
#include <thrust/for_each.h>
#include <thrust/device_vector.h>
#include <iostream>
#include <vector>
using namespace std;
class ADD
{
private:
typedef typename thrust::device_vector<int>::iterator PTR;
public:
ADD(){}
~ADD(){}
void setPtr(PTR &ptr)
{this->ptr=ptr;}
__host__ __device__
void operator()(int &x)
{
// note that using printf in a __device__ function requires
// code compiled for a GPU with compute capability 2.0 or
// higher (nvcc --arch=sm_20)
x+=add();
}
__host__ __device__
int add()
{return *ptr++;}
private:
PTR ptr;
};
int main()
{
thrust::device_vector<int> d_vec(3);
d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
thrust::device_vector<int>::iterator itr=d_vec.begin();
ADD *addtest=new ADD();
addtest->setPtr(itr);
thrust::for_each(d_vec.begin(), d_vec.end(), *addtest);
for(int i=0;i<3;i++)
cout<<d_vec[i]<<endl;
return 0;
}
When I compile this using nvcc -arch=sm_20 test.cu
I got the following warning:
test.cu(28): warning: calling a host function("thrust::experimental::iterator_facade<thrust::detail::normal_iterator<thrust::device_ptr<int> > , thrust::device_ptr<int> , int, thrust::detail::cuda_device_space_tag, thrust::random_access_traversal_tag, thrust::device_reference<int> , long> ::operator *") from a __device__/__global__ function("printf_functor::add") is not allowed
test.cu(28): warning: calling a host function("thrust::experimental::iterator_facade<thrust::detail::normal_iterator<thrust::device_ptr<int> > , thrust::device_ptr<int> , int, thrust::detail::cuda_device_space_tag, thrust::random_access_traversal_tag, thrust::device_reference<int> , long> ::operator *") from a __device__/__global__ function("printf_functor::add") is not allowed
I cannot get this to compile. How can I solve this problem?
#Gang.Wang: I think you just mixing up 2 different things: all STL-like functionality including for_each, device_vector iterators etc. is just a "facade" which exists on the host only.
While operator() contains the actual GPU code which is compiled to CUDA kernel and applied to each element of your vector in parallel. Hence, device_vector::iterators are not accessible from your functor.