sscanf not working properly in mips platform - mips

The following code works correctly for x86 but not in mips platform.
char *str = "11111111-22222222 r-xp 00000000 00:0e 1843624 /lib/libdl.so.0";
unsigned long long start_addr, stop_addr, offset;
char* access = NULL;
char* filename = NULL;
sscanf(str, "%llx-%llx %m[-rwxp] %llx %*[:0-9a-f] %*d %ms",
&start_addr, &stop_addr, &access, &offset, &filename);
printf("\n start : %x, stop : %x, offset : %x\n",start_addr,stop_addr,offset);
printf("\n Permission : %s\n",access);
printf("\n Filename : %s\n",filename);
In x86 it outputs :
start : 11111111, stop : 22222222, offset : 0
Permission : r-xp
Filename : /lib/libdl.so.0
But in mips it is showing :
start : 7ff20f5b, stop : 11111111, offset : 0
Permission : (null)
Filename : (null)
I used mipsel-linux-uclibc toolchain to compile. Can somebody help.

The C code generates a segmentation fault on SunOS. So it works on Linux but not with RISC architectures lijke MIPS(?). You should investigate why using the gdbdebugger, so use the command man gdb or run gdb and insert your breakpoints or I will do it as an exercise.

Related

Texgenpack Wrapper - Compilation Problems

I'm working on a wrapper for texgenpack for my AssetStudio python port.
The goal of the wrapper is a conversion of texture types into a format that PIL can use.
Atm, I simply want to save the original texture as a file, then read it via texgenpack, convert it and feed the result to PIL.
(Later on the file r/w will be replaced by passing bytes.)
When I try to use
def decompress(self, dst_filetype : FileType = FileType.PNG):
# init
cdef Image *image = <Image *> malloc(sizeof(Image))
src = tempfile.NamedTemporaryFile(suffix=self.filetype.name.lower(), delete=True)
dst = tempfile.NamedTemporaryFile(suffix=dst_filetype.name.lower(), delete=True)
#write image data to tempfile
src.write(self.data)
#load temp file as texture -> image
load_image(<const char *>src.name, <int> self.filetype, *image)
#save image as png
save_image(*image, <const char> *dst.name, <int> dst_filetype)
I get the error
#save image as png
save_image(*image, <const char> *dst.name, <int> dst_filetype)
^
------------------------------------------------------------
texgenpack.pyx:57:34: Expected an identifier or literal
I don't understand why the error shows up there, but not at load_image.
I tried multiple things, but pretty much all of them ended up in this error.
Since I mainly want to use it to convert textures I tried to circumvent
the problem by making a c function which does the load/save itself.
void convert_stexture_to_simage(const char *filename, int filetype, const char *dstname) {
Image image;
load_image(filename, filetype, &image);
save_image(&image, dstname, FILE_TYPE_PNG);
}
in image.c and added it to the header.
When I try to use this function via
convert_stexture_to_simage(<const char *>src.name, <int> self.filetype,<const char *>dst.name)
the following error is produced
texgenpack.obj : error LNK2001: unresolved external symbol "void __cdecl convert_stexture_to_simage(char const *,int,char const *)" (?convert_stexture_to_simage##YAXPEBDH0#Z)
build\lib.win-amd64-3.7\texgenpack_py.cp37-win_amd64.pyd : fatal error LNK1120: 1 unresolved externals
I hope that one of you can tell me how one of these two problems can be solved.
Edit
Image is defined as
typedef struct {
unsigned int *pixels;
int width;
int height;
int extended_width;
int extended_height;
int alpha_bits; // 0 for no alpha, 1 if alpha is limited to 0 and 0xFF, 8 otherwise.
int nu_components; // Indicates the number of components.
int bits_per_component; // 8 or 16.
int is_signed; // 1 if the components are signed, 0 if unsigned.
int srgb; // Whether the image is stored in sRGB format.
int is_half_float; // The image pixels are combinations of half-floats. The pixel size is 64-bit.
} Image;
in the .pyx as
ctypedef struct Image:
unsigned int* pixels
int width
int height
int extended_width
int extended_height
int alpha_bits # 0 for no alpha, 1 if alpha is limited to 0 and 0xFF, 8 otherwise.
int nu_components # Indicates the number of components.
int bits_per_component # 8 or 16.
int is_signed # 1 if the components are signed, 0 if unsigned.
int srgb # Whether the image is stored in sRGB format.
int is_half_float # The image pixels are combinations of half-floats. The pixel size is 64-bit.
based on the 2nd answer of this question
The complete code can be found here.
I was able to solve the problem,
it was actually pretty laughable.
The problem was,
that the C source wasn't compiled,
so the function references in the header files couldn't link to any actual functions.
setup.py
import os
from setuptools import Extension, setup
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
def ALL_C(folder, exclude = []):
return [
'/'.join([folder, f])
for f in os.listdir(folder)
if f[-2:] == '.c' and f not in exclude
]
extensions = [
Extension(
name = "texgenpy",
sources = [
"texgen.pyx",
*ALL_C('texgenpack'),
],
#language = "c++",
include_dirs = [
"texgenpack",
"libfgen",
],
)
]
if cythonize:
extensions = cythonize(extensions)
setup(ext_modules = extensions)
correct cython code to load the image via texgenpack
cdef class TTexture:
cdef Image image
def __init__(self, srcfile : str, filetype : int = -1):
if filetype == -1:
filetype = KNOWN_FILE_TYPES.get(srcfile.rsplit('.')[1].upper(), 0x000)
self.load(srcfile, filetype)
def load(self, srcfile, filetype):
# convert filepath to const char
src_b = (u"%s" % srcfile).encode('ascii')
cdef const char*src = src_b
load_image(src, <int> filetype, &self.image)
conversion to pillow image
#property
def image(self) -> PILImage:
# prepare tmp image in case of required conversion
cdef Image tmp_image
clone_image(&self.image, &tmp_image)
# convert image type
if tmp_image.is_half_float:
convert_image_from_half_float(&tmp_image, 0, 1.0, 1.0)
elif tmp_image.bits_per_component != 8:
print("Error -- cannot write PNG file with non 8-bit components.\n")
return None
if tmp_image.nu_components == 1: #grayscale
img = PILImage.new('L', (tmp_image.width, tmp_image.height))
img_data = img.load()
elif tmp_image.alpha_bits > 0:
img = PILImage.new('RGBA', (tmp_image.width, tmp_image.height))
img_data = img.load()
for y in range(tmp_image.height):
for x in range(tmp_image.width):
img_data[y,x] = calc_color_rgba(tmp_image.pixels[y*tmp_image.height + x])
else:
img = PILImage.new('RGB', (tmp_image.width, tmp_image.height))
img_data = img.load()
for y in range(tmp_image.height):
for x in range(tmp_image.width):
img_data[y,x] = calc_color(tmp_image.pixels[y*tmp_image.height + x])
return img

How do identify STATUS_INVALID_CRUNTIME_PARAMETER exception

Platform is Windows 7 SP1.
I recently spent some time debugging an issue that was caused because a code was passing an invalid parameter to one of the "safe" CRT functions. As a result my application was aborted right away with no warning or anything -- not even a crash dialog.
At first, I tried to figure this out by attaching Windbg to my application. However when the crash happened, by the time the code broke into Windbg pretty much every thread had been killed save for ONE thread on which Windbg had to break into. There was no clue as to what was wrong. So, I attached Visual Studio as a debugger instead and when my application terminated, I saw every thread exiting with error code 0xc0000417. That is what gave me the clue that there is an invalid parameter issue somewhere.
Next, the way I went about trying to debug this is to once again attach Windbg to my application but this time randomly (by trial & error) place breakpoints in various places like kernel32!TerminateThread, kernel32!UnhandledExceptionFilter and kernel32!SetUnhandledExceptionFilter.
Of the lot, placing a break point at SetUnhandledExceptionFilter immediately showed the callstack of the offending thread when the crash occurred and the CRT function that we were calling incorrectly.
Question: Is there anything intuitive that should have told me to place bp on SUEF right away? I would like to understand this a bit better and not do this by trial and error. Second question is w.r.t to the error code I determined via Visual Studio. Without resorting to VS, how do I determine thread exit codes on Windbg?
i was going to just comment but this became bigger so an answer
setting windbg as postmortem debugger using Windbg -I will also route all the unhandled exception to windbg
Windbg -I should Register windbg as postmortem debugger
by default Auto is set to 1 in AeDebug Registry Key
if you don't want to debug every program you can edit this to 0
to provide you an additional DoYouWanttoDebug option in the wer Dialog
reg query "hklm\software\microsoft\windows nt\currentversion\aedebug"
HKEY_LOCAL_MACHINE\software\microsoft\windows nt\currentversion\aedebug
Debugger REG_SZ "xxxxxxxxxx\windbg.exe" -p %ld -e %ld -g
Auto REG_SZ 0
assuming you registered a postmortem debugger and you run this code
#include <stdio.h>
#include <stdlib.h>
int main (void)
{
unsigned long input[] = {1,45,0xf001,0xffffffff};
int i = 0;
char buf[5] = {0};
for(i=0;i<_countof(input);i++)
{
_ultoa_s(input[i],buf,sizeof(buf),16);
printf("%s\n",buf);
}
return 1;
}
on the exception you will see a wer dialog like this
you can now choose to debug this program
windows also writes the exit code on unhandled exception to event log
you can use powershell to retrieve one event like this
PS C:\> Get-EventLog -LogName Application -Source "Application Error" -newest 1| format-list
Index : 577102
EntryType : Error
InstanceId : 1000
Message : Faulting application name:
ultos.exe, version: 0.0.0.0, time stamp: 0x577680f1
Faulting module name: ultos.exe, version:
0.0.0.0, time stamp: 0x577680f1
Exception code: 0xc0000417
Fault offset: 0x000211c2
Faulting process id: 0x4a8
Faulting application start time: 0x01d1d3aaf61c8aaa
Faulting application path: E:\test\ulto\ultos.exe
Faulting module path: E:\test\ulto\ultos.exe
Report Id: 348d86fc-3f9e-11e6-ade2-005056c00008
Category : Application Crashing Events
CategoryNumber : 100
ReplacementStrings : {ultos.exe, 0.0.0.0, 577680f1, ultos.exe...}
Source : Application Error
TimeGenerated : 7/1/2016 8:42:21 PM
TimeWritten : 7/1/2016 8:42:21 PM
UserName :
and if you choose to debug
you can view the CallStack
0:000> kPL
# ChildEBP RetAddr
00 001ffdc8 77cf68d4 ntdll!KiFastSystemCallRet
01 001ffdcc 75e91fdb ntdll!NtTerminateProcess+0xc
02 001ffddc 012911d3 KERNELBASE!TerminateProcess+0x2c
03 001ffdec 01291174 ultos!_invoke_watson(
wchar_t * expression = 0x00000000 "",
wchar_t * function_name = 0x00000000 "",
wchar_t * file_name = 0x00000000 "",
unsigned int line_number = 0,
unsigned int reserved = 0)+0x31
04 001ffe10 01291181 ultos!_invalid_parameter(
wchar_t * expression = <Value unavailable error>,
wchar_t * function_name = <Value unavailable error>,
wchar_t * file_name = <Value unavailable error>,
unsigned int line_number = <Value unavailable error>,
unsigned int reserved = <Value unavailable error>)+0x7a
05 001ffe28 0128ad96 ultos!_invalid_parameter_noinfo(void)+0xc
06 001ffe3c 0128affa ultos!common_xtox<unsigned long,char>(
unsigned long original_value = 0xffffffff,
char * buffer = 0x001ffea4 "",
unsigned int buffer_count = 5,
unsigned int radix = 0x10,
bool is_negative = false)+0x58
07 001ffe5c 0128b496 ultos!common_xtox_s<unsigned long,char>(
unsigned long value = 0xffffffff,
char * buffer = 0x001ffea4 "",
unsigned int buffer_count = 5,
unsigned int radix = 0x10,
bool is_negative = false)+0x59
08 001ffe78 012712b2 ultos!_ultoa_s(
unsigned long value = 0xffffffff,
char * buffer = 0x001ffea4 "",
unsigned int buffer_count = 5,
int radix = 0n16)+0x18
09 001ffeac 0127151b ultos!main(void)+0x52
0a (Inline) -------- ultos!invoke_main+0x1d
0b 001ffef8 76403c45 ultos!__scrt_common_main_seh(void)+0xff
0c 001fff04 77d137f5 kernel32!BaseThreadInitThunk+0xe
0d 001fff44 77d137c8 ntdll!__RtlUserThreadStart+0x70
0e 001fff5c 00000000 ntdll!_RtlUserThreadStart+0x1b

systemtap userspace function tracing

I have a simple c++ program
main.cpp
#include <iostream>
using namespace std;
int addition (int a, int b)
{
int r;
r=a+b;
return r;
}
int main ()
{
int z;
z = addition (5,3);
cout << "The result is " << z;
}
I want to generate the function tracing for this
- print function names and its input output and return types
My systemtap script : para-callgraph.stp
#! /usr/bin/env stap
function trace(entry_p, extra) {
%( $# > 1 %? if (tid() in trace) %)
printf("%s%s%s %s\n",
thread_indent (entry_p),
(entry_p>0?"->":"<-"),
probefunc (),
extra)
}
probe $1.call { trace(1, $$parms) }
probe $1.return { trace(-1, $$return) }
My C++ Exec is called : a ( compiled as g++ -g main.cpp)
Command I run
stap para-callgraph.stp 'process("a").function("*")' -c "./a > /dev/null"
0 a(15119):->_GLOBAL__I__Z8additionii
27 a(15119): ->__static_initialization_and_destruction_0 __initialize_p=0x0 __priority=0x0
168 a(15119): <-__static_initialization_and_destruction_0
174 a(15119):<-_GLOBAL__I__Z8additionii
0 a(15119):->main
18 a(15119): ->addition a=0x0 b=0x400895
30 a(15119): <-addition return=0x8
106 a(15119):<-main return=0x0
Here ->addition a=0x0 b=0x400895 : its address and not actual values ie 5, 3 which I want.
How to modify my stap script?
This appears to be a systemtap bug. It should print the value of b, not its address. Please report it to the systemtap#sourceware.org mailing list (with compiler/etc. versions and other info, as outlined in man error::reporting.
As to changing the script, the $$parms part is where the local variables are being transformed into a pretty-printed string. It could be changed to something like...
trace(1, $$parms . (#defined($foobar) ? (" foobar=".$foobar$) : ""))
to append foobar=XYZ to the trace record, whereever a parameter foobar is available. To work around the systemtap bug in question, you could try
trace(1, $$parms . (#defined($b) ? (" *b=".user_int($b)) : ""))
to dereference the b variable as if it were an int *.

How to solve "Segmentation fault (core dumped)"

Here is my code:
#include<stdio.h>
#include<pcap.h>
void pcapdump(u_char* argument,const struct pcap_pkthdr* packet_header,const u_char* packet_content);
int main()
{
int i=0, devid,ret;
char errbuf[PCAP_ERRBUF_SIZE];
pcap_t *handle;
bpf_u_int32 mask;
bpf_u_int32 net;
int num_packets=500;
pcap_dumper_t *p;
pcap_if_t *alldevs;
pcap_if_t *pdev;
const struct pcap_pkthdr *packet_header;
const u_char *packet_content;
ret=pcap_findalldevs(&alldevs,errbuf);
if(ret=-1)
{
printf("%s",errbuf);
};
for (pdev = alldevs;pdev;pdev=pdev->next)
printf("#%d: %s %s %s\n",++i,pdev->name,pdev->description,pdev->description);
printf("select a device: ");
scanf("%d", &devid);
pdev=alldevs;
while (--devid)
pdev=pdev->next;
printf("Selected %s \n", pdev->name);
if (pcap_lookupnet(pdev->name,&net,&mask,errbuf)==-1)
{
printf("Couldn't get netmask for device %s: %s\n", pdev->name, errbuf);
net = 0;
mask = 0;
};
handle=pcap_open_live(pdev->name,BUFSIZ,1,0,errbuf);
printf("Number of packets: %d\n", num_packets);
pcap_dump_open(handle,"/home/jiangzhongbai/capturefiles/10.pcapng");
pcap_loop(handle,num_packets,pcap_dump,NULL);
pcap_dump_close(p);
pcap_freealldevs(alldevs);
pcap_close(handle);
printf("\nCapture complete.\n");
return 0;
}
The result is
eth0 (null) (null)
wlan0 (null) (null)
nflog Linux netfilter log (NFLOG) interface Linux netfilter log (NFLOG) interface
nfqueue Linux netfilter queue (NFQUEUE) interface Linux netfilter queue (NFQUEUE) interface
any Pseudo-device that captures on all interfaces Pseudo-device that captures on all interfaces
lo (null) (null)
select a device: 2
Selected wlan0
Number of packets: 500
Segmentation fault (core dumped)
I think there is something wrong with the functionpcap_dump_open.But I don't know how to solve the problem of Segmentation fault (core dumped).Please help me.
How to solve Segmentation fault (core dumped)
If pcap_findalldevs() returns -1, don't just print an error message, quit, because alldevs isn't necessarily set to a valid value or to NULL.
Do not assume that pdev->description is non-null - only print it if it's non-null.
Assign the result of pcap_dump_open() to the variable p.
Pass p, rather than NULL, as the fourth argument to pcap_loop().

CUDA_ERROR_INVALID_IMAGE during cuModuleLoad

I've created a very simple kernel (can be found here) which I successfully compile using
"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v5.5\bin\nvcc.exe" --cl-version 2012 -ccbin "C:\Program Files (x86)\Microsoft Visual Studio 11.0\VC\bin" -I"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v5.5\include" -cudart static -cubin temp.cu
and subsequently use the following code to load the kernel in
CUresult err = cuInit(0);
CUdevice device;
err = cuDeviceGet(&device, 0);
CUcontext ctx;
err = cuCtxCreate(&ctx, 0, device);
CUmodule module;
string path = string(dir) + "\\temp.cubin";
err = cuModuleLoad(&module, path.c_str());
cuCtxDetach(ctx);
Unfortunately, during cuModuleLoad I get a result of CUDA_ERROR_INVALID_IMAGE. Can someone tell me why this could be happening? The kernel's valid and compiles without issues.
The CUDA_ERROR_INVALID_IMAGE error should only be returned by cuModuleLoad when the module file is invalid. If it is missing or contains an architecture mismatch you should probably see a CUDA_ERROR_FILE_NOT_FOUND or CUDA_ERROR_INVALID_SOURCE error. You haven't given us enough details or code to say for certain what is happening, but in principle at least, the API code you have should work.
To show how this should work, consider the following working example on Linux with CUDA 5.5:
First your kernel:
#include <cmath>
using namespace std;
__device__ __inline__ float trim(unsigned char value)
{
return fminf((unsigned char)255, fmaxf(value, (unsigned char)0));
}
__constant__ char z = 1;
__global__ void kernel(unsigned char* img, const float* a)
{
int ix = blockIdx.x;
int iy = threadIdx.x;
int tid = iy*blockDim.x + ix;
float x = (float)ix / blockDim.x;
float y = (float)iy / gridDim.x;
//placeholder
img[tid*4+0] = trim((a[0]*z*z+a[1]*z+a[2]) * 255.0f);
img[tid*4+1] = trim((a[3]*z*z+a[4]*z+a[5]) * 255.0f);
img[tid*4+2] = trim((a[6]*z*z+a[7]*z+a[8]) * 255.0f);
img[tid*4+3] = 255;
}
Then a simple program to load the cubin into a context at runtime:
#include <cuda.h>
#include <string>
#include <iostream>
#define Errchk(ans) { DrvAssert((ans), __FILE__, __LINE__); }
inline void DrvAssert( CUresult code, const char *file, int line)
{
if (code != CUDA_SUCCESS) {
std::cout << "Error: " << code << " " << file << "#" << line << std::endl;
exit(code);
} else {
std::cout << "Success: " << file << "#" << line << std::endl;
}
}
int main(void)
{
Errchk( cuInit(0) );
CUdevice device;
Errchk( cuDeviceGet(&device, 0) );
CUcontext ctx;
Errchk( cuCtxCreate(&ctx, 0, device) );
CUmodule module;
std::string path = "qkernel.cubin";
Errchk( cuModuleLoad(&module, path.c_str()) );
cuCtxDetach(ctx);
return 0;
}
Build the cubin for the architecture of the device present in the host (a GTX670 in this case):
$ nvcc -arch=sm_30 -Xptxas="-v" --cubin qkernel.cu
ptxas info : 11 bytes gmem, 1 bytes cmem[3]
ptxas info : Compiling entry function '_Z6kernelPhPKf' for 'sm_30'
ptxas info : Function properties for _Z6kernelPhPKf
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 10 registers, 336 bytes cmem[0]
and the host program:
$ nvcc -o qexe qmain.cc -lcuda
then run:
$ ./qexe
Success: qmain.cc#18
Success: qmain.cc#20
Success: qmain.cc#22
Success: qmain.cc#26
The module code loads. If I delete the cubin and run again, I see this:
$ rm qkernel.cubin
$ ./qexe
Success: qmain.cc#18
Success: qmain.cc#20
Success: qmain.cc#22
Error: 301 qmain.cc#26
If I compile for an incompatible architecture, I see this:
$ nvcc -arch=sm_10 -Xptxas="-v" --cubin qkernel.cu
ptxas info : 0 bytes gmem, 1 bytes cmem[0]
ptxas info : Compiling entry function '_Z6kernelPhPKf' for 'sm_10'
ptxas info : Used 5 registers, 32 bytes smem, 4 bytes cmem[1]
$ ./qexe
Success: qmain.cc#18
Success: qmain.cc#20
Success: qmain.cc#22
Error: 300 qmain.cc#26
If I compile to an object file, not a cubin, I see this:
$ nvcc -arch=sm_30 -Xptxas="-v" -c -o qkernel.cubin qkernel.cu
ptxas info : 11 bytes gmem, 1 bytes cmem[3]
ptxas info : Compiling entry function '_Z6kernelPhPKf' for 'sm_30'
ptxas info : Function properties for _Z6kernelPhPKf
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 10 registers, 336 bytes cmem[0]
$ ./qexe
Success: qmain.cc#18
Success: qmain.cc#20
Success: qmain.cc#22
Error: 200 qmain.cc#26
This is the only way I can get the code to emit a CUDA_ERROR_INVALID_IMAGE error. All I can suggest is to try my code and recipe and see if you can get it to work.
Happens if you compile for different machine types - for example 32 vs 64.
If you have 32bits app, add --machine 32 to the nvcc param and it will be fine.