When using debug_traceCall, I get a low-level EVM trace of all opcodes and state changes during the execution. This is excessively detailed. When I use default callTracer, I can get a much nicer call tree. However, neither way I cannot seem to be able to extract the emitted events from the trace. I can see them in the trace (LOG* opcodes) however there is no easy way to actually parse them to something "readable" (along with values and originating address) There must be a way to get the logs - any ideas?
Eg. this is what Etherscan shows https://etherscan.io/tx-decoder?tx=0x3e3ad35fda1fddd9e154b3860b50371a1acd2fdb4f27f897e234846522bde732 (see Emitted Events section)
So I figured this myself - I created a custom JavaScript tracer for geth that is passed to geth in 3rd param to debug_traceCall (see provided API reference by the link):
{
data: [],
fault: function (log) {
},
step: function (log) {
var topicCount = (log.op.toString().match(/LOG(\d)/) || [])[1];
if (topicCount) {
var res = {
address: log.contract.getAddress(),
data: log.memory.slice(parseInt(log.stack.peek(0)), parseInt(log.stack.peek(0)) + parseInt(log.stack.peek(1))),
};
for (var i = 0; i < topicCount; i++)
res['topic' + i.toString()] = log.stack.peek(i + 2);
this.data.push(res);
}
},
result: function () {
return this.data;
}
}
This tracer is executed by geth for each operation in the trace. Essentially what it does:
check if this is one of LOG0, LOG1, LOG2, LOG3 or LOG4 EVM opcodes
extract contract address from current contract
extract default topic0 and subsequent topics (if any)
extract additional event data from memory (note: stack[0] is offset, stack[1] is data size)
Passing the tracer to geth looks like this:
res = await ethersProvider.send('debug_traceCall', [{
from: tx.from,
to: tx.to,
gas: BigNumber.from(tx.gas)._hex.replace('0x0', '0x'),
gasPrice: BigNumber.from(tx.gasPrice)._hex.replace('0x0', '0x'),
value: BigNumber.from(tx.value)._hex.replace('0x0', '0x'),
data: tx.input
}, "latest", {
tracer: "{\n" +
" data: [],\n" +
" fault: function (log) {\n" +
" },\n" +
" step: function (log) {\n" +
" var topicCount = (log.op.toString().match(/LOG(\\d)/) || [])[1];\n" +
" if (topicCount) {\n" +
" var res = {\n" +
" address: log.contract.getAddress(),\n" +
" data: log.memory.slice(parseInt(log.stack.peek(0)), parseInt(log.stack.peek(0)) + parseInt(log.stack.peek(1))),\n" +
" };\n" +
" for (var i = 0; i < topicCount; i++)\n" +
" res['topic' + i.toString()] = log.stack.peek(i + 2);\n" +
" this.data.push(res);\n" +
" }\n" +
" },\n" +
" result: function () {\n" +
" return this.data;\n" +
" }\n" +
"}",
enableMemory: true,
enableReturnData: true,
disableStorage: true
}])
Related
The script doesn't throw any errors, but rarely completely works - i.e. complete successfully with all of the expected data in the destination tab. The results breakdown is generally:
no results in the destination sheet - this happens ~50-75% of the time
all of the results in the destination sheet, except in cell A1 - ~25% of the time
100% completely works - ~15-25% of the time
code snippet of the batchupdate() call
var data = [
{
range: (ss.getSheetName() + "!A1:AQ" + valueArray.length)
,values: valueArray
}
];
const resource = {
valueInputOption: "RAW"
,data: data
};
Logger.log("request = " + JSON.stringify(resource)
+ "\n" + "valueArray = " + valueArray.length
);
Logger.log(" Sheets.Spreadsheets.Values.batchUpdate(params, batchUpdateValuesRequestBody) ");
var response = Sheets.Spreadsheets.Values.batchUpdate(resource, spreadsheetId);
Logger.log("response = " + response.toString());
and the response
response = {
"totalUpdatedRows": 37776,
"responses": [{
"updatedCells": 1482389,
"updatedRange": "BatchUpdateDestination!A1:AP37776",
"updatedColumns": 42,
"spreadsheetId": "adahsdassadasdsadaasdasdasdasdasdasdasdasdas",
"updatedRows": 37776
}
],
"spreadsheetId": "adahsdassadasdsadaasdasdasdasdasdasdasdasdas",
"totalUpdatedCells": 1482389,
"totalUpdatedSheets": 1,
"totalUpdatedColumns": 42
}
Its obviously a very large dataset, but I've pruned the destination spreadsheet to ensure there is ample room for the data, and from earlier testing, I believe that a specific size error would be returned if that was the blocker.
How can I troubleshoot, or better yet, prevent these incomplete executions? is there any way to inspect the batch jobs that these requests initiate?
Answering my own question...
After toiling with this a little more, I couldn't figure out any way to troublshooting or inspect the odd, seemingly successfully batchUpdate() jobs. Thus, I resorted to batching the batchUpdate() calls into batches of 15000. This seems to work consistently, though maybe a bit slower:
// This is the very large 2D array that is populated elsewhere
var valueArray = [];
var maxRows = valueArray.length;
var maxCols = valueArray[0].length;
var batchSize = 15000;
var lastBatchSize = 1;
for (var currentRowCount = 1; currentRowCount <= maxRows; ++currentRowCount) {
if( currentRowCount % batchSize == 0
|| currentRowCount == maxRows
)
{
Logger.log("get new valuesToSet");
valuesToSet = valueArray.slice(lastBatchSize - 1, currentRowCount -1);
var data = [
{
range: (ss.getSheetName() + "!A" + lastBatchSize + ":AQ" + (lastBatchSize + valuesToSet.length))
,values: valuesToSet
}
];
const resource = {
valueInputOption: "RAW"
,data: data
};
Logger.log("request = " + JSON.stringify(resource).slice(1, 100)
+ "\n" + "valuesToSet.length = " + valuesToSet.length
);
try {
var checkValues = null;
var continueToNextBatch = false;
for (var i = 1; i <= 3; ++i) {
Logger.log("try # = " + i
+ "\n" + " continueToNextBatch = " + continueToNextBatch
+ "\n" + " make the batchUpdate() request, then sleep for 5 seconds, then check if there are values in the target range."
+ "\n" + " if no values, then wait 5 seconds, check again."
+ "\n" + " if still not values after 3 tries, then resubmit the batchUpdate() requestion and recheck values"
+ "\n" + "range to check = " + "A" + lastBatchSize + ":AQ" + lastBatchSize
);
Logger.log(" Sheets.Spreadsheets.Values.batchUpdate(params, batchUpdateValuesRequestBody) ");
var response = Sheets.Spreadsheets.Values.batchUpdate(resource, spreadsheetId);
Logger.log("response = " + response.toString());
/// loop and check for data in newly written range
for (var checks = 1; checks <= 3; ++checks) {
Utilities.sleep(5000);
var checkValues = ss.getRange(("A" + lastBatchSize + ":AQ" + lastBatchSize)).getValues();
Logger.log("new cell populated - checks # = " + checks
+ "\n" + "range to check = " + "A" + lastBatchSize + ":AQ" + lastBatchSize
+ "\n" + "checkValues.length = " + checkValues.length
+ "\n" + "checkValues = " + checkValues
);
if(checkValues.length > 1)
{
Logger.log("checkValues.length > 1, so continue to next batch"
+ "\n" + "range to check = " + "A" + lastBatchSize + ":AQ" + lastBatchSize
+ "\n" + "checkValues.length = " + checkValues.length
+ "\n" + "checkValues = " + checkValues
);
continueToNextBatch = true;
continue;
}
else
{
Logger.log("checkValues.length is still not > 1, so try the request again"
+ "\n" + "range to check = " + "A" + lastBatchSize + ":AQ" + lastBatchSize
);
}
}
if(continueToNextBatch)
{
continue;
}
}
}
catch (e) {
console.error("range.setValues(valuesToSet) - yielded an error: " + e
+ "\n" + "valuesToSet = " + valuesToSet.length
+ "\n" + "maxRows = " + maxRows
+ "\n" + "maxCols = " + maxCols
+ "\n" + "currentRowCount = " + currentRowCount
+ "\n" + "current range row start (lastBatchSize) = " + lastBatchSize
+ "\n" + "current range row end (j - lastBatchSize) = " + (currentRowCount - lastBatchSize)
);
}
lastBatchSize = currentRowCount;
}
}
I have a Node.js app that is writing data to a MySQL backend. One field is an array I stringify. I can see in the workbench the data is correct when in the database. However when I retrieve it I am getting an error when I try to parse it.
"Unexpected token o in JSON at position 1"
If I log the result it shows up as [Object Object].
From what I read online this means it is already a JS object and I do not need to parse it. However I cannot find anything about how to get access to the data.
process: function (bot, msg, suffix) {
var ftcmds = suffix.split(" ", 1);
var ftName = ftcmds[0];
var ftArray;
var selectSQL = "SELECT FireTeam FROM fireteam WHERE Name = '" + ftName + "'";
var updateSQL = "UPDATE fireteam SET FireTeam = '" + ftArray + "'WHERE Name = '" + ftName + "'";
mysqlcon.query(selectSQL, function (err, result) {
console.log("Result |" + result);
console.log("Error |" + err);
if (err) {
console.log("Caught Error " + err + " " + msg.author);
}
else {
console.log("Recovered result " + result);
ftArray = result;
console.log("Attempting to update array");
ftArray.push(msg.author.id);
console.log("updated array " + ftArray);
var jsonArray = JSON.stringify(ftArray);
mysqlcon.query(updateSQL, function (err, result) {
console.log("Result |" + result);
console.log("Error |" + err);
if (err.toString().indexOf(dupErr) != -1) {
msg.author.send("Could not find that fireteam");
console.log("Error: Did not locate the requested name " + msg.author)
} else if (err) {
console.log("Caught Error " + err + " " + msg.author);
}
else {
msg.author.send("You have joined Fireteam " + name + ". I will setup a group chat on " + date + " if your team fills up.");
}
})
}
});
}
You should just be able to access it as an object, so if result has fields name and title you can just access them as:
var name = result.name
var title = result.title
I have the following AJAX code for processing a returned results from the database,
$.ajax({
type: 'POST',
async: true,
url: "../../../avocado/private/functions/measures.php",
data: {name:selectedValue},
success: function(data, status){
var selectedData = JSON.parse(data);
console.log(selectedData);
document.getElementById("measures").innerHTML = "<div id=\"measures\">"
+ "<table class=\"table table-condensed\">"
+ "<tr><th>desc1</th><td>"+selectedData[0][6]+"</td></tr>"
+ "<tr><th>desc2</th><td>"+selectedData[0][7]+"</td></tr>"
+ "<tr><th>desc3</th><td>"+selectedData[0][8]+"</td></tr>"
+ "<tr><th>desc4</th><td>"+selectedData[0][9]+"</td></tr>"
+ "</table>"
+ "</div>";
},
error: function(xhr, status, err) {
alert(status + ": " + err);
}
});
The data returned is a 2D array, like this below,
Array[5]
0: Array[14]
1: Array[14]
2: Array[14]
3: Array[14]
4: Array[14]
so what I want to do is to loop each array and display the inner information on the HTML page but I have no idea how I should go about doing it.. this code only returns the values stored on index[0].
Can I please get some help?
==============================================================================
UPDATED
So I tried to use Jquery.append() like the following below..
jQuery.each( selectedData, function( i, val ) {
$("measures").append(
"<table class=\"table table-condensed\">"
+ "<tr><th>desc1</th><td>"+selectedData[i][6]+"</td></tr>"
+ "<tr><th>desc2</th><td>"+selectedData[i][7]+"</td></tr>"
+ "<tr><th>desc3</th><td>"+selectedData[i][8]+"</td></tr>"
+ "<tr><th>desc4</th><td>"+selectedData[i][9]+"</td></tr>"
+ "</table>"
);
});
/*
document.getElementById("measures").innerHTML = "<div id=\"measures\">"
+ "<table class=\"table table-condensed\">"
+ "<tr><th>desc1</th><td>"+selectedData[0][6]+"</td></tr>"
+ "<tr><th>desc2</th><td>"+selectedData[0][7]+"</td></tr>"
+ "<tr><th>desc3</th><td>"+selectedData[0][8]+"</td></tr>"
+ "<tr><th>desc4</th><td>"+selectedData[0][9]+"</td></tr>"
+ "</table>"
+ "</div>";
*/
now..its not appending any values to the div #measures at all...
I have not tried the code. But I hope this will help you.
document.getElementById("measures").innerHTML = "<div id=\"measures\">";
$.each( selectedData, function( index, value ){
$.each( index, function( index2, value2 ){
$('#measures').append(value2);
});
});
I am trying to write a Google Apps Script that will process all emails that have a specific label.
I am using the GmailApp.search function to retrieve all of the relevant emails, but when I try to use the functions document in the GmailThread class, I get an error message that says that it can't find the function.
Here is my code;
var incoming = "To_Bot"
function readBotsEmail()
{
var emails = GmailApp.search("label:" + incoming);
Logger.log("This is the 'emails' object:" + emails)
var emailsLoopIndex = 0
for (var email in emails)
{
emailsLoopIndex += 1;
try
{
Logger.log("iteration " + emailsLoopIndex + " " + email.getMessageCount());
}
catch(e)
{
Logger.log("iteration " + emailsLoopIndex + " " + e);
}
}
}
Here is the logger output.
[14-01-26 03:40:00:909 EST] This is the 'emails' object:GmailThread,GmailThread
[14-01-26 03:40:00:911 EST] iteration 1 TypeError: Cannot find function getMessageCount in object 0.
[14-01-26 03:40:00:914 EST] iteration 2 TypeError: Cannot find function getMessageCount in object 1.
Where am I going wrong?
You should avoid using ambiguous variable names, "email" and "emails" are really bad choice when talking about threads on one side and index integer on the other...
Your issue comes mainly from this confusion between both variables, you used email instead of email*S* and also seem to forget that your value was an array of threads thus needing to be indexed.
Here is your working code, just one letter difference ;-) and a couple of brackets...
function readBotsEmail()
{
var emails = GmailApp.search("label:" + incoming);
Logger.log("This is the 'emails' object:" + emails)
var emailsLoopIndex = 0
for (var email in emails)
{
emailsLoopIndex += 1;
try
{
Logger.log("iteration " + emailsLoopIndex + " " + emails[email].getMessageCount());
}
catch(e)
{
Logger.log("iteration " + emailsLoopIndex + " " + e);
}
}
}
That said, you still have a lot of work on this script to make it return something interesting... for now it informs you on the number of threads and how many message they have... Anyway, that's a good start...
Good luck !
I have a tight loop that fetches PNG files via xhr2. Works fine in FF and IE10. In Chrome when I list of files hits about 5,500 I start getting xhr errors. I like the async interface since I am interleaving these requests with local indexedDB store requests.
Code below (I am using xhr2lib for fetches and PouchDB for the IndexedDB API).
I know that it is the XHR2 that is failing, since when this works, in Chrome, all the XHR2 calls are processed before the SaveDB() calls. When It fails, I never get the save calls.
function getBlobs(fileList) {
console.log("starting to fetch blobs");
$.each(fileList, function (i, val) {
var path = baseURL + val.id + "." + imageType;
$xhr.ajax({
url: path,
dataType: "blob",
success: function (data) { saveBlob(data, val.size, val.id); }
});
});
}
function saveBlob(blob, length, id) {
if (blob.size != length) {
console.error("Blob Length found: " + blob.size + " expected: " + length);
}
putBlob(blob, id);
++fetchCnt;
if (fetchCnt == manifest.files.length) {
setTimeout(fetchComplete, 0);
}
}
function fetchComplete() {
var startTime = vm.get("startTime");
var elapsed = new Date() - startTime;
var fetchTime = ms2Time(elapsed);
vm.set("fetchTime", fetchTime);
}
function putBlob(blob, id) {
var cnt;
var type = blob.type;
DB.putAttachment(id + "/pic", blob, type, function (err, response) {
if (err) {
console.error("Could store blob: error: " + err.error + " reason: " + err.reason + " status: " + err.status);
} else {
console.log("saved: ", response.id + " rev: " + response.rev);
cnt = vm.get("blobCount");
vm.set("blobCount", ++cnt);
if (cnt == manifest.files.length) {
setTimeout(storeComplete, 0);
}
}
});
}
The chromium folks acknowledge this is something that they should fix: https://code.google.com/p/chromium/issues/detail?id=244910, but in the meantime I have implemented throttling using jquery defer/resolve to keep the number of threads low.