I'm struggling to reverse engineer a section of data associated with a CRC-16 checksum.
I know the polynom used to calculate the original checksums is 0x8408 but nothing else, I don't know initial value (if any), final XOR value (if any), if the input or the result is reflected...
It seems like there is a known CRC-16 generator using thing polynom, CRC-16-CCITT but despite everything I've tried I just can't understand how the original checksum is being calculated.
Here is the data I've got with their respective checksums. I also included a byte that is between the data and the checksum, it's incremental and I'm not whether it's calculated or not. (see the last two lines, data is almost the same, increment is not the same and yet the checksum are identical)
| DATA |Inc|CRC|
|----------------------------------------------------------|---|---|
00 00 00 00 00 00 01 ef f7 fe ef ff fd ef fb fa fd a2 aa 21 01 f4 e0
00 00 00 00 00 00 01 ef f7 fd ef ff fd fe fb fa fd a2 aa 21 02 f4 d1
00 00 00 00 00 00 01 f7 fe fd fd ff fd df ff fb fd a2 aa 21 03 f4 cd
00 00 00 00 00 00 01 f7 fe fe fd ff f7 ef ff fa fd a2 aa 21 04 f4 c2
00 00 00 00 00 00 01 ef f7 fe ef ff fe ef fb fa fd a2 aa 21 05 f4 db
00 00 00 00 00 00 01 ef f7 fe ef ff fd ef fb fa fd a2 aa 21 06 f4 db
The last byte of each line appears to be 0xF3 + the negative sum of all but the last byte (including the 0xF4). This code works for the 5 examples:
typedef unsigned char uint8_t;
static uint8_t data0[] =
{0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xef,0xf7,0xfe,0xef,
0xff,0xfd,0xef,0xfb,0xfa,0xfd,0xa2,0xaa,0x21,0x01,0xf4,0xe0};
static uint8_t data1[] =
{0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xef,0xf7,0xfd,0xef,
0xff,0xfd,0xfe,0xfb,0xfa,0xfd,0xa2,0xaa,0x21,0x02,0xf4,0xd1};
static uint8_t data2[] =
{0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xf7,0xfe,0xfd,0xfd,
0xff,0xfd,0xdf,0xff,0xfb,0xfd,0xa2,0xaa,0x21,0x03,0xf4,0xcd};
static uint8_t data3[] =
{0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xf7,0xfe,0xfe,0xfd,
0xff,0xf7,0xef,0xff,0xfa,0xfd,0xa2,0xaa,0x21,0x04,0xf4,0xc2};
static uint8_t data4[] =
{0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xef,0xf7,0xfe,0xef,0xff,
0xfe,0xef,0xfb,0xfa,0xfd,0xa2,0xaa,0x21,0x05,0xf4,0xdb};
static uint8_t data5[] =
{0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xef,0xf7,0xfe,0xef,0xff,
0xfd,0xef,0xfb,0xfa,0xfd,0xa2,0xaa,0x21,0x06,0xf4,0xdb};
int main()
{
size_t i;
uint8_t c = 0xf3;
uint8_t s;
s = c;
for(i = 0; i < sizeof(data0)-1; i++)
s -= data0[i];
if (data0[i] != s)
printf("mismatch\n");
s = c;
for (i = 0; i < sizeof(data1) - 1; i++)
s -= data1[i];
if (data1[i] != s)
printf("mismatch\n");
s = c;
for (i = 0; i < sizeof(data2) - 1; i++)
s -= data2[i];
if (data2[i] != s)
printf("mismatch\n");
s = c;
for (i = 0; i < sizeof(data3) - 1; i++)
s -= data3[i];
if (data3[i] != s)
printf("mismatch\n");
s = c;
for (i = 0; i < sizeof(data2) - 1; i++)
s -= data4[i];
if (data4[i] != s)
printf("mismatch\n");
return 0;
}
Related
I'm attempting to decode raw h264 from a network stream using the Media Foundation Transform CLSID_MSH264DecoderMFT. Setting up the transform seems to work and it's accepting data. However, no matter how much data I provide, it always returns MF_E_TRANSFORM_NEED_MORE_INPUT.
The document says, that the decoder will skip over all data until it finds valid Sequence and Picture Parameters. I'm providing this and then a raw data frame along with start codes:
1 00 00 00 01 67 42 c0 28 da 01 e0 19 fe 7c 05 a8 08 08 0a 00 00 03 00 02 00 00 03 00 61 1e 30 65
2 40 00 00 00 01 68 ce 3c 80 00 00 00 01 00 00 0e 6c 41 9a e0 eb 08 84 3c 14 ff fe 10 ff f8 64 14
3 f0 88 20 11 55 d5 7e 19 11 17 17 c5 c5 3f 05 00 a3 86 41 08 8a ae ab 58 8c 1f 11 88 cd f8 9f ff
4 f8 9d 78 21 f9 2a bf e2 3e 04 1f f8 20 08 92 7c 0e 33 52 67 e1 48 74 32 f8 5c 5f ca fd 77 12 df
5 3a 0f 93 11 89 2f 26 98 76 16 65 9b 78 87 77 ff ff fe 27 c6 fe b1 39 34 27 04 17 55 f0 61 fe 23
Above is only a partial sample, but it's representative of the data I provide to the transform.
Transform Setup:
ComPtr<IUnknown> pUnknown = nullptr;
HRESULT hResult = CoCreateInstance(CLSID_MSH264DecoderMFT, nullptr, CLSCTX_INPROC_SERVER, IID_IUnknown, &pUnknown);
if (S_OK != hResult) {
LogError("Failed to create H264 decoder");
return false;
}
hResult = pUnknown->QueryInterface(IID_PPV_ARGS(&mVideoDecoder));
if (hResult != S_OK) {
LogError("Failed to create H264 decoder");
return false;
}
ComPtr<IMFMediaType> pInputMediaType = nullptr;
hResult = MFCreateMediaType(&pInputMediaType);
if (S_OK != hResult) {
return false;
}
pInputMediaType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
pInputMediaType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
std::shared_ptr<VideoMp4Track> videoTrack = mDemuxer->getVideoTrack();
uint32_t width = videoTrack->getWidth();
uint32_t height = videoTrack->getHeight();
MFSetAttributeSize(pInputMediaType.Get(), MF_MT_FRAME_SIZE, width, height);
MFSetAttributeRatio(pInputMediaType.Get(), MF_MT_PIXEL_ASPECT_RATIO, width, height);
MFSetAttributeRatio(pInputMediaType.Get(), MF_MT_FRAME_RATE, videoTrack->getFrameRate(), 1);
pInputMediaType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_MixedInterlaceOrProgressive);
ComPtr<IMFAttributes> attributes;
mVideoDecoder->GetAttributes(&attributes);
hResult = attributes->SetUINT32(CODECAPI_AVLowLatencyMode, 1);
if (hResult != S_OK) {
LogError("Failed to set low latency mode. Video might be choppy.");
}
hResult = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, 1);
if (hResult != S_OK) {
LogError("Failed to set GPU acceleration. Video might be choppy.");
}
hResult = mVideoDecoder->SetInputType(0, pInputMediaType.Get(), 0);
if (hResult != S_OK) {
LogError("Failed to set input type for decoder");
return false;
}
ComPtr<IMFMediaType> pOutputType = nullptr;
hResult = MFCreateMediaType(&pOutputType);
if (S_OK != hResult) {
return false;
}
pOutputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
pOutputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
MFSetAttributeSize(pOutputType.Get(), MF_MT_FRAME_SIZE, width, height);
MFSetAttributeRatio(pOutputType.Get(), MF_MT_PIXEL_ASPECT_RATIO, width, height);
MFSetAttributeRatio(pOutputType.Get(), MF_MT_FRAME_RATE, videoTrack->getFrameRate(), 1);
hResult = mVideoDecoder->SetOutputType(0, pOutputType.Get(), 0);
if (hResult != S_OK) {
LogError("Failed to set input type for decoder");
return false;
}
// Notify the resampler.
hResult = mVideoDecoder->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL);
if (S_OK != hResult) {
LogError("Failed to send flush command to the decoder.");
return false;
}
hResult = mVideoDecoder->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL);
if (S_OK != hResult) {
LogError("Failed to send notify command to the decoder.");
return false;
}
hResult = mVideoDecoder->ProcessMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL);
if (S_OK != hResult) {
LogError("Failed to send notify command to the decoder.");
return false;
}
I have no idea why it isn't able to decode, would appreciate any help.
Thanks.
Edit:
DataPtr transformData = MakeDataPtr();
uint32_t startCode = 0x01000000;
std::shared_ptr<VideoMp4Track> video = mImpl->mDemuxer->getVideoTrack();
transformData->appendBytes(&startCode, 4);
DataPtr sps = video->getSequenceParameters();
transformData->appendData(*sps);
transformData->appendBytes(&startCode, 4);
DataPtr pps = video->getPictureParameters();
transformData->appendData(*pps);
transformData->appendBytes(&startCode, 4);
transformData->appendData(*sampleData);
transformData->appendBytes(&startCode, 4);
ComPtr<IMFSample> pSample = mImpl->createMFSample(transformData->getBytes(), transformData->getSize());
if (nullptr == pSample) {
LogError("Failed to create the buffer for decoder input");
return nullptr;
}
HRESULT hResult = mImpl->mVideoDecoder->ProcessInput(0, pSample.Get(), 0);
if (hResult != S_OK) {
if (hResult == MF_E_NOTACCEPTING) {
mImpl->mVideoDecoder->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL);
hResult = mImpl->mVideoDecoder->ProcessInput(0, pSample.Get(), 0);
}
else {
LogError("Error feeding to resampler...");
return nullptr;
}
}
DWORD dwStatus = 0;
// outputDataBuffer is empty, need to create it.
MFT_OUTPUT_DATA_BUFFER outputDataBuffer{};
ComPtr<IMFSample> pVideoSample = nullptr;
hResult = MFCreateSample(&pVideoSample);
if (S_OK != hResult) {
LogError("Failed to create a media sample for decoder output");
return false;
}
ComPtr<IMFMediaBuffer> pOutputBuffer = nullptr;
hResult = MFCreateMemoryBuffer(sampleData->getSize(), &pOutputBuffer);
if (S_OK != hResult) {
LogError("Failed to create a memory buffer for decoder output");
return false;
}
pVideoSample->AddBuffer(pOutputBuffer.Get());
outputDataBuffer.pSample = pVideoSample.Get();
do {
hResult = mImpl->mVideoDecoder->ProcessOutput(0, 1, &outputDataBuffer, &dwStatus);
if (hResult == MF_E_TRANSFORM_NEED_MORE_INPUT) {
// conversion end
break;
}
I've omitted the rest because it never gets further, it just stays in this loop populating the transform.
Edit 2:
(Not) Working sample on github
https://github.com/pma07pg/h264
The sample code was too large to dump here so I've put the main.cpp on github. Should be able to just put it into a VS project and run it off the bat.
There are few bugs in your code.
1.) You didn't account for the start code size
yours:
const uint32_t parameterInputSize = sizeof(pictureParameters) + sizeof(sequenceParameters);
mine:
const uint32_t parameterInputSize = sizeof(startCode) + sizeof(pictureParameters) + sizeof(startCode) + sizeof(sequenceParameters);
Your 'mdat's contain more than one AccessUnit. Each AccessUnit is prefixed with its length which you have to replace with a start code.
Your 'mdat':
'mdat' = <size> data[0] | <size> data[1] | ... | <size> data[n] |
Replace the size with a start code and break the multiple Access Units into individual Access Units.
Required decoder input:
00 00 00 01 data[0]
00 00 00 01 data[1]
...
00 00 00 01 data[n]
See details here: https://github.com/go4shoe/MedieFoundationExample
I have a Perl CGI script that is accessing Thai language, UTF-8 strings from a PostgreSQL DB and returning them to a web-based front end as JSON. The strings are fine when I get them from the DB and after I encode them as JSON (based on writing to a log file). However, when the client receives them they are corrupted, for example:
featurename "à¹\u0082รà¸\u0087à¹\u0080รียà¸\u0099วัà¸\u0094ภาษี"
Clearly some chars are being converted to Unicode escape sequences, but not all.
I could really use some suggestions as to how to solve this.
Simplified code snippet follows. I am using 'utf8' and 'utf8::all', as well as 'JSON'.
Thanks in advance for any help you can provide.
my $dataId = $cgi->param('dataid');
my $table = "uploadpoints";
my $sqlcommand = "select id,featurename from $table where dataid=$dataId;";
my $stmt = $gDbh->prepare($sqlcommand);
my $numrows = $stmt->execute;
# print JSON header
print <<EOM;
Content-type: application/json; charset="UTF-8"
EOM
my #retarray;
for (my $i = 0; ($i < $numrows); $i=$i+1)
{
my $hashref = $stmt->fetchrow_hashref("NAME_lc");
#my $featurename = $hashref->{'featurename'};
#logentry("Point $i feature name is: $featurename\n");
push #retarray,$hashref;
}
my $json = encode_json (\#retarray);
logentry("JSON\n $json");
print $json;
I have modified and simplified the example, now running locally rather than via browser invocation:
my $dataId = 5;
my $table = "uploadpoints";
my $sqlcommand = "select id,featurename from $table where dataid=$dataId and id=75;";
my $stmt = $gDbh->prepare($sqlcommand);
my $numrows = $stmt->execute;
my #retarray;
for (my $i = 0; ($i < $numrows); $i=$i+1)
{
my $hashref = $stmt->fetchrow_hashref("NAME_lc");
my $featurename = $hashref->{'featurename'};
print "featurename $featurename\n";
push #retarray,$hashref;
}
my $json = encode_json (\#retarray);
print $json;
Using hexdump as in Stefan's example, I've determined that the data as read from the database are already in UTF-8. It looks as though they are being re-encoded in the JSON encode method. But why?
The data in the JSON use exactly twice as many bytes as the original UTF-8.
perl testcase.pl | hexdump -C
00000000 66 65 61 74 75 72 65 6e 61 6d 65 20 e0 b9 82 e0 |featurename ....|
00000010 b8 a3 e0 b8 87 e0 b9 80 e0 b8 a3 e0 b8 b5 e0 b8 |................|
00000020 a2 e0 b8 99 e0 b9 81 e0 b8 88 e0 b9 88 e0 b8 a1 |................|
00000030 e0 b8 88 e0 b8 b1 e0 b8 99 e0 b8 97 e0 b8 a3 e0 |................|
00000040 b9 8c 0a 5b 7b 22 66 65 61 74 75 72 65 6e 61 6d |...[{"featurenam|
00000050 65 22 3a 22 c3 a0 c2 b9 c2 82 c3 a0 c2 b8 c2 a3 |e":"............|
00000060 c3 a0 c2 b8 c2 87 c3 a0 c2 b9 c2 80 c3 a0 c2 b8 |................|
00000070 c2 a3 c3 a0 c2 b8 c2 b5 c3 a0 c2 b8 c2 a2 c3 a0 |................|
00000080 c2 b8 c2 99 c3 a0 c2 b9 c2 81 c3 a0 c2 b8 c2 88 |................|
00000090 c3 a0 c2 b9 c2 88 c3 a0 c2 b8 c2 a1 c3 a0 c2 b8 |................|
000000a0 c2 88 c3 a0 c2 b8 c2 b1 c3 a0 c2 b8 c2 99 c3 a0 |................|
000000b0 c2 b8 c2 97 c3 a0 c2 b8 c2 a3 c3 a0 c2 b9 c2 8c |................|
000000c0 22 2c 22 69 64 22 3a 37 35 7d 5d |","id":75}]|
000000cb
Further suggestions? I tried using decode on the UTF string but got errors related to wide characters.
I did read the recommended answer from Tom Christianson, as well as his Unicode tutorials, but I will admit much of it went over my head. Also it seems my problem is considerably more constrained.
I did wonder whether retrieving the hash value and assigning it to a normal variable was doing some sort of auto-decoding or encoding. I do not really understand when Perl uses its internal character format as opposed to when it retains the external encoding.
UPDATE WITH SOLUTION
Turns out that since the string retrieved from the DB is already in UTF-8, I need to use 'to_json' rather than 'encode_json'. This fixed the problem. Learned a lot about Perl Unicode handling in the process though...
Also recommend: http://perldoc.perl.org/perluniintro.html
Very clear exposition.
NOTE: you should probably also read this answer, which makes my answer sub-par in comparison :-)
The problem is that you have to be sure in which format each string is, otherwise you'll get incorrect conversions. When handling UTF-8 a string can be in two formats:
raw UTF-8 encoded octet string, i.e. \x{100} represented as two octets 0xC4 0x80
internal Perl string representation, i.e. one Unicode character \x{100} (U+0100 Ā LATIN CAPITAL LETTER A WITH MACRON)
If I/O is involved you also need to know if the I/O layer does UTF-8 de/encoding or not. For terminal I/O you also have to consider if it understands UTF-8 or not. Both taken together can make it difficult to get meaningful debug printouts from your code.
If you Perl code needs to process UTF-8 strings after reading them from the source, you must make sure that they are in internal Perl format. Otherwise you'll get surprising result when you call code that expects Perl strings and not raw octet strings.
I try to show this in my example code:
#!/usr/bin/perl
use warnings;
use strict;
use JSON;
open(my $utf8_stdout, '>& :encoding(UTF-8)', \*STDOUT)
or die "can't reopen STDOUT as utf-8 file handle: $!\n";
my $hex = "C480";
print "${hex}\n";
my $raw = pack('H*', $hex);
print STDOUT "${raw}\n";
print $utf8_stdout "${raw}\n";
my $decoded;
utf8::decode($decoded = $raw);
print STDOUT ord($decoded), "\n";
print STDOUT "${decoded}\n"; # Wide character in print at...
print $utf8_stdout "${decoded}\n";
my $json = JSON->new->encode([$decoded]);
print STDOUT "${json}\n"; # Wide character in print at...
print $utf8_stdout "${json}\n";
$json = JSON->new->utf8->encode([$decoded]);
print STDOUT "${json}\n";
print $utf8_stdout "${json}\n";
exit 0;
Copy & paste from my terminal (which supports UTF-8). Look closely at the differences between the lines:
$ perl dummy.pl
C480
Ā
Ä
256
Wide character in print at dummy.pl line 21.
Ā
Ā
Wide character in print at dummy.pl line 25.
["Ā"]
["Ā"]
["Ā"]
["Ä"]
But compare this to the following, where STDOUT is not a terminal, but piped to another program. The hex dump always shows "c4 80", i.e. UTF-8 encoded.
$ perl dummy.pl | hexdump -C
Wide character in print at dummy.pl line 21.
Wide character in print at dummy.pl line 22.
Wide character in print at dummy.pl line 25.
Wide character in print at dummy.pl line 26.
00000000 43 34 38 30 0a c4 80 0a c4 80 0a 5b 22 c4 80 22 |C480.......[".."|
00000010 5d 0a 5b 22 c4 80 22 5d 0a 43 34 38 30 0a c4 80 |].[".."].C480...|
00000020 0a 32 35 36 0a c4 80 0a 5b 22 c4 80 22 5d 0a 5b |.256....[".."].[|
00000030 22 c4 80 22 5d 0a |".."].|
00000036
I wonder how exactly a constructor or destructor is being called for eg in c++? Im especially interested in the OS point of view. Im also interested in the case where we run android app written in java and we want to get info about user session. Can we use conatructor to set time of beginning of session and destr to set time of ending the session and save the data in database? Does actually OS handle destructors calls or something else? Thanks in advance!
I'm not familiar with how Java handles constructor and destructor (Java involves virtual machine layer), but I'll try to answer this from a cpp point of view.
The short answer to your question: OS does not participate in constructor or destructor (unless there's heap allocation, system call...). Compiler will insert calls to constructor and destructor in the right place when it generates machine code.
For a simple program as follows:
class A{
int* i;
public:
A() { i = new int; }
~A() { delete i; }
};
int main() {
A a;
}
Let's examine the assembly code emitted by compiler using objdump:
00000000004006a6 <main>:
4006a6: 55 push %rbp
4006a7: 48 89 e5 mov %rsp,%rbp
4006aa: 48 83 ec 10 sub $0x10,%rsp
4006ae: 64 48 8b 04 25 28 00 mov %fs:0x28,%rax
4006b5: 00 00
4006b7: 48 89 45 f8 mov %rax,-0x8(%rbp)
4006bb: 31 c0 xor %eax,%eax
4006bd: 48 8d 45 f0 lea -0x10(%rbp),%rax
4006c1: 48 89 c7 mov %rax,%rdi
4006c4: e8 27 00 00 00 callq 4006f0 <_ZN1AC1Ev>
4006c9: 48 8d 45 f0 lea -0x10(%rbp),%rax
4006cd: 48 89 c7 mov %rax,%rdi
4006d0: e8 3f 00 00 00 callq 400714 <_ZN1AD1Ev>
4006d5: b8 00 00 00 00 mov $0x0,%eax
4006da: 48 8b 55 f8 mov -0x8(%rbp),%rdx
4006de: 64 48 33 14 25 28 00 xor %fs:0x28,%rdx
4006e5: 00 00
4006e7: 74 05 je 4006ee <main+0x48>
4006e9: e8 92 fe ff ff callq 400580 <__stack_chk_fail#plt>
4006ee: c9 leaveq
4006ef: c3 retq
Note that depending on the underlying architecture and compiler, your output might not be the same as mine, but the structure should generally be the same.
You can see compiler automatically generates calls to constructor callq 400714 <_ZN1AD1Ev> and destructor callq 400714 <_ZN1AD1Ev>. The assembly code for constructor is:
00000000004006f0 <_ZN1AC1Ev>:
4006f0: 55 push %rbp
4006f1: 48 89 e5 mov %rsp,%rbp
4006f4: 48 83 ec 10 sub $0x10,%rsp
4006f8: 48 89 7d f8 mov %rdi,-0x8(%rbp)
4006fc: bf 04 00 00 00 mov $0x4,%edi
400701: e8 8a fe ff ff callq 400590 <_Znwm#plt>
400706: 48 89 c2 mov %rax,%rdx
400709: 48 8b 45 f8 mov -0x8(%rbp),%rax
40070d: 48 89 10 mov %rdx,(%rax)
400710: 90 nop
400711: c9 leaveq
400712: c3 retq
400713: 90 nop
Assembly for destructor:
0000000000400714 <_ZN1AD1Ev>:
400714: 55 push %rbp
400715: 48 89 e5 mov %rsp,%rbp
400718: 48 83 ec 10 sub $0x10,%rsp
40071c: 48 89 7d f8 mov %rdi,-0x8(%rbp)
400720: 48 8b 45 f8 mov -0x8(%rbp),%rax
400724: 48 8b 00 mov (%rax),%rax
400727: 48 89 c7 mov %rax,%rdi
40072a: e8 31 fe ff ff callq 400560 <_ZdlPv#plt>
40072f: 90 nop
400730: c9 leaveq
400731: c3 retq
400732: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1)
400739: 00 00 00
40073c: 0f 1f 40 00 nopl 0x0(%rax)
I'm trying to connect to the Safecom TA-810 (badge/registration system) to automate the process of calculating how long employee's have worked each day. Currently this is done by:
Pulling the data into the official application
Printing a list of all 'registrations'
Manually entering the values from the printed lists into our HR application
This is a job that can take multiple hours which we'd like to see automated. So far the official tech support has been disappointing and refused to share any details.
Using wireshark I have been capturing the UDP transmissions and have pretty much succeeded in understanding how the protocol is built up. I'm only having issues with what i suppose is a CRC field. I don't know how it is calculated (CRC type and parameters) and using which fields ...
This is how a message header looks like:
D0 07 71 BC BE 3B 00 00
D0 07 - Message type
71 BC - This i believe is the CRC
BE 3B - Some kind of session identifier. Stays the same for every message after the initial message (initial message has '00 00' as value)
00 00 - Message number. '01 00', '02 00', '03 00'
Some examples:
Header only examples
E8 03 17 FC 00 00 00 00 -> initial request (#0, no session nr)
D0 07 71 BC BE 3B 00 00 -> Initial response (#0, device sends a session nr)
4C 04 EF BF BE 3B 06 00 -> Message #6, still using the same session # as the initial response
Larger example, which has data
0B 00 07 E1 BE 3B 01 00 7E 45 78 74 65 6E 64 46 6D 74
I've also been trying to figure this out by reading the disassembled code from the original application. The screenshot below happens before the socket.sendto and seems to be related.
Any help will be extremely appreciated.
EDIT: Made some success with debugging the application using ollydbg. The CRC appears in register (reversed) EDX at the selected line in the following screenshot.
Take a look at CRC RevEng. If you can correctly identify the data that the CRC is operating on and the location of the CRC, you should be able to determine the CRC parameters. If it is a CRC.
I've managed to create a php script that does the CRC calculation by debugging the application using OllyDbg.
The CRC is calculated by Adding up every 2 bytes (every short). if the result is larger than a short, the 'most significant short' is added to the 'least significant short' until the result fits in a short. Finally, the CRC (short) is inverted.
I'll add my php script for completeness:
<?php
function CompareHash($telegram)
{
$telegram = str_replace(" ", "", $telegram);
$telegram_crc = substr($telegram, 4, 4);
$telegram = str_replace($telegram_crc, "0000", $telegram);
echo "Telegram: ", $telegram, ', Crc: ', $telegram_crc, ' (', hexdec($telegram_crc), ')<br />';
$crc = 0;
$i = 0;
while ($i < strlen($telegram))
{
$short = substr($telegram, $i, 4);
if (strlen($short) < 4) $short = $short . '00';
$crc += hexdec($short);
$i += 4;
}
echo "Crc: ", $crc, ', inverse: ', ~$crc;
// Region "truncate CRC to Int16"
while($crc > hexdec('FFFF'))
{
$short = $crc & hexdec ('FFFF');
// Region "unsigned shift right by 16 bits"
$crc = $crc >> 16;
$crc = $crc & hexdec ('FFFF');
// End region
$crc = $short + $crc;
}
// End region
// Region "invert Int16"
$crc = ~$crc;
$crc = $crc & hexdec ('FFFF');
// End region
echo ', shifted ', $crc;
if (hexdec($telegram_crc) == $crc)
{
echo "<br />MATCH!!! <br />";
}
else
{
echo "<br />failed .... <br />";
}
}
$s1_full = "E8 03 17 FC 00 00 00 00";
$s2_full = "D0 07 71 BC BE 3B 00 00";
$s3_full = "D0 07 4E D4 E1 23 00 00";
$s4_full = "D0 07 35 32 BE 3B 07 00 7E 44 65 76 69 63 65 4E 61 6D 65 3D 54 41 38 31 30 00";
$s5_full = "0B 00 39 6C BE 3B 05 00 7E 52 46 43 61 72 64 4F 6E";
CompareHash($s1_full);
CompareHash($s2_full);
CompareHash($s3_full);
CompareHash($s4_full);
CompareHash($s5_full);
?>
Thanks for the feedback!
I have some binary data stream which passes geo location coordinates - latitude and longitude. I need to find the method they are encoded.
4adac812 = 74°26.2851' = 74.438085
2b6059f9 = 43°0.2763' = 43.004605
4adaee12 = 74°26.3003' = 74.438338
2a3c8df9 = 42°56.3177' = 42.938628
4ae86d11 = 74°40.1463' = 74.669105
2afd0efb = 42°59.6263' = 42.993772
1st value is hex value. 2nd & 3rd are values that I get in output (not sure which one is used in conversion).
I've found that first byte represents integer part of value (0x4a = 74). But I cannot find how decimal part is encoded.
I would really appreciate any help!
Thanks.
--
Upd: This stream comes from some "chinese" gps server software through tcp protocol. I have no sources or documentation for clent software. I suppose it was written in VC++6 and uses some standard implementations.
--
Upd: Here is packets I get:
Hex data:
41 00 00 00 13 bd b2 2c
4a e8 6d 11 2a 3c 8d f9
f6 0c ee 13
Log data in client soft:
[Lng] 74°40.1463', direction:1
[Lat] 42°56.3177', direction:1
[Head] direction:1006, speed:3318, AVA:1
[Time] 2011-02-25 19:52:19
Result data in client (UI):
74.669105
42.938628
Head 100 // floor(1006/10)
Speed 61.1 // floor(3318/54.3)
41 00 00 00 b1 bc b2 2c
4a da ee 12 2b 60 59 f9
00 00 bc 11
[Lng] 74°26.3003', direction:1
[Lat] 43°0.2763', direction:1
[Head] direction:444, speed:0, AVA:1
[Time] 2011-02-25 19:50:49
74.438338
43.004605
00 00 00 00 21 bd b2 2c
4a da c8 12 aa fd 0e fb
0d 0b e1 1d
[Lng] 74°26.2851', direction:1
[Lat] 42°59.6263', direction:1
[Head] direction:3553, speed:2829, AVA:1
[Time] 2011-02-25 19:52:33
74.438085
42.993772
I don't know what first 4 bytes mean.
I found the lower 7 bits of 5th byte represent the number of sec. (maybe 5-8 bits are time?)
Byte 9 represent integer of Lat.
Byte 13 is integer of Lng.
Bytes 17-18 reversed (word byte) is speed.
Bytes 19-20 reversed is ava(?) & direction (4 + 12 bits). (btw, somebody knows what ava is?)
And one note. In 3rd packet 13th byte you can see only lower 7 bits are used. I guess 1st bit doesnt mean smth (I removed it in the beginning, sorry if I'm wrong).
I have reordered your data so that we first have 3 longitures and then 3 latitudes:
74.438085, 74.438338, 74.669105, 43.004605, 42.938628, 42.993772
This is the best fit of the hexadecimals i can come up with is:
74.437368, 74.439881, 74.668392, 42.993224, 42.961388, 42.982391
The differences are: -0.000717, 0.001543, -0.000713, -0.011381, 0.022760, -0.011381
The program that generates these values from the complete Hex'es (4 not 3 bytes) is:
int main(int argc, char** argv) {
int a[] = { 0x4adac812, 0x4adaee12, 0x4ae86d11, 0x2b6059f9, 0x2a3c8df9, 0x2afd0efb };
int i = 0;
while(i<3) {
double b = (double)a[i] / (2<<(3*8)) * 8.668993 -250.0197;
printf("%f\n",b);
i++;
}
while(i<6) {
double b = (double)a[i] / (2<<(3*8)) * 0.05586007 +41.78172;
printf("%f\n",b);
i++;
}
printf("press key");
getch();
}
Brainstorming here.
If we look at the lower 6 bits of the second byte (data[1]&0x3f) we get the "minutes" value for most of the examples.
0xda & 0x3f = 0x1a = 26; // ok
0x60 & 0x3f = 0; // ok
0xe8 & 0x3f = 0x28 = 40; // ok
0x3c & 0x3f = 0x3c = 60; // should be 56
0xfd & 0x3f = 0x3d = 61; // should be 59
Perhaps this is the right direction?
I have tried your new data packets:
74+40.1463/60
74+26.3003/60
74+26.2851/60
42+56.3177/60
43+0.2763/60
42+59.6263/60
74.66910, 74.43834, 74.43809, 42.93863, 43.00460, 42.99377
My program gives:
74.668392, 74.439881, 74.437368, 42.961388, 42.993224, 39.407346
The differences are:
-0.000708, 0.001541, -0.000722, 0.022758, -0.011376, -3.586424
I re-used the 4 constants i derived from your first packet as those are probably stored in your client somewhere. The slight differences might be the result of some randomization the client does to prevent you from getting the exact value or reverse-engineering their protocol.