Determining the CID of larger (> 256KB) files - ipfs

I'm setting up a repo to be used in projects where CID (related) data needs to be transacted on-chain and where I'm following a work-flow of:
1.) Establishing the CID data;
2.) Transacting said data;
3.) Publishing/Importing data in to IPFS after a successful transaction.
The main purpose of the repo is to reliably determine CIDs without importing data into IPFS(step 1). The workflow is aimed at avoiding the risk of front-running, based on data becoming publicly available before the transaction sub 2 is completed (or even initiated). My thoughts on this perceived risk, basically run down to this:
The purpose of the on-chain transaction is not only to uniquely identify content (say NFT material), but also to establish/determine its provenance, or create some form of connection with its creator/(original) owner. Cleary the public availability of such content, detracts from the core purpose of such a transaction, since it allows for - let's call it - NFT front running; one could monitor the network/specific nodes generally involved in publishing NFT(like material)to the IPFS network, for announcements on data that has been added, and claim the mentioned connection for oneself.
Question: Is this "problem" as practical as I am perceiving it? I can only find very limited information on mitigating this risk, although 1.) the practice of creating gas-less NFT's is increasingly popular and 2.) most of the data will likely enter the IPFS through pinning services (with presumably short announcement intervals) are increasingly popular (instead of via self-managed nodes where one could programatically decouple adding(pre-transaction) and pinning/announcement(post-transaction).
Issue(the main reason of this entry): I'm having difficulties establishing the CID for content exceeding the block-size.
Creating a DAG Node and adding children to it:
# parts of the test file cid-from-scratch.js
describe("Create DAG-PB root from scratch", function() {
let dagNode;
it("Creates root DAG NODE", async function() {
this.timeout(6000);
dagNode = await sliceAddLink(buffer, new DAGNode())
// Comparing the length Links property of the created root node, with that retrieved from local node
// using the same content
assert.equal(dagNode.Links.length, localDag.value.Links.length,
"Expected same amount of children in created, as in retrieved DAG ")
for (i = 0; i < dagNode.Links.length; i++) {
console.log("Children Strings: ", dagNode.Links[i].Hash.toString())
// Comparing the strings of the children
assert.equal(dagNode.Links[i].Hash.toString(), localDag.value.Links[i].Hash.toString(), "Children's CID should be same")
}
console.log(dagNode)
})
})
/**
*
* #param {Buffer} buffer2Slice The full content Buffer
* #param {Object} dagNode new DAGNode()
*/
function sliceAddLink(buffer2Slice, dagNode) {
return new Promise(async function(resolve, reject) {
try {
while (buffer2Slice.length > 0) {
let slice = buffer2Slice.slice(0, 262144);
buffer2Slice = buffer2Slice.slice(262144);
sliceAddResult = await createCid(slice, ...[, , ], 85);
let link = { Tsize: slice.length, Hash: sliceAddResult };
dagNode.addLink(link);
}
resolve(dagNode)
} catch (err) {  reject(err) }
})
}
/**
*
* #param {Buffer} content
* #param {string} [hashAlg] // optional
* #param {number} [cidVersion] // optional
* #param {number} [cidCode] // optional - should be set to 85 in creating DAG-LINK
*/
async function createCid(content, hashAlg, cidVersion, cidCode) {
hashAlg = hashAlg || cidOptions.hashAlg;
cidVersion = cidVersion || cidOptions.cidVersion
cidCode = cidCode || cidOptions.code
let fileHash = await multiHashing(content, hashAlg)
return new CID(cidVersion, cidCode, fileHash)
}
The Links property of the created DAG matches that of the DAG retrieved from the local ipfs node and that from Infura (of course based on same content). The problem is that, unlike the retrieved DAG Nodes, the data field of the created DAGNode is empty(and therefore yielding a diff CID):
DAGNode retrieved: Data property containing data
DAGNode created: Data property is Empty
I'm adding to IPFS like so:
/**
* #note ipfs.add with the preset options
* #param {*} content Buffer (of File) to be published
* #param {*} ipfs The ipfs instance involved
*/
async function assetToIPFS(content, ipfs) {
let result = await ipfs.add(content, cidOptions)
return result;
}
// Using the following ADD options
const cidOptions = {
cidVersion: 1, // ipfs.add default = 0
hashAlg: 'sha2-256',
code: 112
}
it("Adding publicly yields the same CID result", async function() {
// longer then standard time out because of interaction with public IPFS gateway
this.timeout(6000);
_ipfsInfura = await ipfsInfura;
let addResult = await assetToIPFS(buffer, _ipfsInfura)
assert.equal(localAddResult.cid.toString(), addResult.cid.toString(), "Different CIDS! (expected same)")
assert.equal(localAddResult.size, addResult.size, "Expected same size!")
})
and subsequently getting the DAG (from both Local Node and Infura Node) like so(, to compare them with the created DAG):
// Differences in DAG-PB object representation in Infura and Local node!
it("dag_get local and dag_get infura yield the same Data and Links array", async function() {
this.timeout(6000);
let cid = localAddResult.cid
localDag = await dagGet(cid, _ipfsLocal);
let infuraDag = await dagGet(cid, _ipfsInfura);
console.log("Local DAG: ", localDag.value);
console.log("Infura DAG: ", infuraDag.value);
// Differences in DAG-PB object representation in Infura and Local node
// Data is Buffer in localDag and uint8array in infuraDag
assert.equalBytes(await dagData(localDag.value), await dagData(infuraDag.value), 'Expected Equal Data')
assert.equal(infuraDag.value.Links.length, localDag.value.Links.length, "Expected same amount of children")
assert(localDag.value.Links.length > 0, "Should have children (DAG-LINK objects)")
for (i = 0; i < localDag.value.Links.length; i++) {
assert.equal(infuraDag.value.Links[i].Hash.toString(), localDag.value.Links[i].Hash.toString())
}
})
IPFS Docs on the issue of work with blocks state that "A given file's 'hash' is actually the hash of the root (uppermost) node in the DAG."
You could suspect that the DAG node's Data field plays a role in this. On the other hand, looking at the length of "Data", the js-ipfs examples on dag.put ('Some data') and the IPLD DAG-PB specifications ("Data may be omitted or a byte array with a length of zero or more"), this property seems more arbitrary. (The Data array/buffer from both the infura and local ipfs node have the same content)
How can I create a root DAG node (using a content buffer and CID options), with not only the same Links property, but also the same Data property as the DAG root I'm getting after adding the same content buffer to an ipfs instance?

Related

How exactly does ipfs cat method find and display contents of files using a CID by making use of DHT?

I have done a lot of research on the internet to learn how exactly ipfs cat and get methods work find and download files from other peers using a CID. I want to fully understand how this process works: "The cat method first searches your own node for the file requested, and if it can't find it there, it will attempt to find it on the broader IPFS network(https://proto.school/regular-files-api/04)".
This is the ipfs source code for cat:
async function * cat (ipfsPath, options = {}) {
ipfsPath = normalizeCidPath(ipfsPath)
if (options.preload !== false) {
const pathComponents = ipfsPath.split('/')
preload(CID.parse(pathComponents[0]))
}
const file = await exporter(ipfsPath, repo.blocks, options)
// File may not have unixfs prop if small & imported with rawLeaves true
if (file.type === 'directory') {
throw new Error('this dag node is a directory')
}
if (!file.content) {
throw new Error('this dag node has no content')
}
yield * file.content(options)
}
I deduce that two important arguments that allow for peer routing and file fetching are repo.blocks and preload. repo.blocks is created during ipfs.create() and then passed as a parameter to ipfs.createCat() which is the method that actually creates the cat method. preload is also created by ipfs.create() and passed as an argument to ipfs.createCat() so that it can be used in ipfs.cat(). What confuses me the most is which one of preload or repo.blocks is actually responsible for CID querying. I analyzed the underlying methods for this part of cat:
const pathComponents = ipfsPath.split('/')
preload(CID.parse(pathComponents[0]))
and learned that this is the part of ipfs.cat that makes http connections to other peers. However, this part:
const file = await exporter(ipfsPath, repo.blocks, options)
includes sub-methods like
const block = await blockstore.get(cid, options);
const node = dagPb.decode(block);
which also seem to be related to CID querying through use of distributed hash tables. blockstore.get did not make use of any methods that seemed to connect to other peers or search for peers that have a CID, but I am still very confused on whether these methods have any relation to CID querying. I highly appreciate any help on how the cat method works under the hood from someone who is an expert in ipfs or at least resources I can use to learn the material myself.

FIWARE: IotAgent-json over MQTT

I'm working on connecting sensors to a FIWARE system.
The sensor can report the monitoring data with MQTT in json payload.
With a public mqtt broker(hivemq), I can get the monitoring data payload as below:
{
"src":"shellyplus1pm-7c87ce64d540",
"dst":"shellyplus1pm-7c87ce64d540/events",
"method":"NotifyStatus",
"params":{
"ts":1659606613.35,
"switch:0":{
"id":0,
"apower":38.04
}
}
}.
my questions are:
(1) the sensor can only publish data on topic DeviceId/events/rpc, but if I use FIWARE IotAgent-json, the IotAgent-json expects subscribing data on topic /json/{{api-key}}/{{device-id}}/attrs (according https://github.com/yanpengwuIoT/tutorials.IoT-over-MQTT), how can I let the IotAgent-json subscribe data on topic DeviceId/events/rpc, which is sensor firmware defined and can't be changed.
(2) the sensor publishs monitoring payload as a nested multi-level json object as showed above, but the IotAgent-json can only support a single level json object (like '{"h": 70, "t": 15}' as described in https://github.com/telefonicaid/iotagent-json/blob/master/docs/usermanual.md), how can I parse the multi-level json object in IotAgent-json?
Any comment, sample or documentation for this are very appreciated! Thank you very much.
You can create a small relay middleware to read from one topic and write to another:
const mqtt = require('mqtt');
const MQTT_BROKER_URL = process.env.MQTT_BROKER_URL || 'mqtt://mosquitto';
const MQTT_TOPIC_PROTOCOL = process.env.MQTT_TOPIC_PROTOCOL || 'json';
global.MQTT_CLIENT = mqtt.connect(MQTT_BROKER_URL);
MQTT_CLIENT.on('connect', () => {
MQTT_CLIENT.subscribe('/+/events/rpc');
});
MQTT_CLIENT.on('message', measureReceived);
function measureReceived(topic, message) {
const parts = topic.toString().split('/');
// Extract the deviceId
const deviceId = parts[2];
// Muck around with the payload and use the right topic
const apiKey = 'XXX'
process.nextTick(() => {relay(apiKey, deviceId, newPayload, newTopic)});
}
function relay(apiKey, deviceId, state, topic) {
let mqttTopic = '/' + apiKey + '/' + deviceId + '/' + topic;
MQTT_CLIENT.publish(mqttTopic, state);
}
You could hard-code the logic to unpack the incoming complex measure and a create a simpler payload, or you could investigate the use of the IoT Agent's expression library and provision the device so that it cherry picks the correct parts of the payload. Doing this is more sustainable in the long run, but would require knowledge of the JEXL expression language

MQL4: Read single value from CSV

I'm trying to fetch one value from the data source website Quandlto be used within a MetaTrader4 script. The data source site provides a method to export data via API formats including .csv, .json or .xml. I have chosen the .csv format, which the data source website then provides an API call for me to use in the following format:
https://www.quandl.com/api/v3/datasets/ADB/LAB_UNEMP_JPN.csv?rows=1&api_key=my_api_key
By using the rows=1parameter in the above API call, I can choose to just export one value (which is the latest value).
Q1. Can I fetch the value straight from Quandl or do I have to save the dataset as a .csv file?
Because Quandl provides the API call (as seen above), would I be correct in assuming I can just fetch the value from their website and won't have to save the dataset to my computer as a .csvfile, which I would then have to fetch the latest value from? I would much prefer to fetch the value straight from Quandl without saving any files.
Q2. How can I fetch the value to be used within my MT4 script?
I have unsuccessfully tried a method using FileOpen() to access the data on the site, and have then tried to print it so that I can compare the value to others. Is FileOpen() only for .csv files only saved to my computer? I'd like to be able to print the value within my script once retrieved so that I can use it. Here is what I have so far:
int start() {
while (!IsStopped()) {
Sleep(2000);
int handle;
int value;
handle=FileOpen("https://www.quandl.com/api/v3/datasets/ADB/LAB_UNEMP_JPN.csv?rows=1&api_key=my_api_key", FILE_CSV, ';');
if(handle>0)
{
value=FileReadNumber(handle);
Print(handle);
FileClose(handle);
}
}
If anyone could aid me in my pursuit to fetch this value and print it within my script, it would be a huge help.
A1: No, you need not use a proxy-file for this API
If one tries the API call, using a published Quandl syntax of: <pragma>://<URL.ip>/<relative.URL>[?<par.i>=<val.i>[&<par.j>=<val.j>[&...]]]
the server side will push you the content of:
Date,Value
2013-12-31,4.0
So, your code may use Quandl API with like this:
void OnStart()
{
string cookie = NULL,
headers;
char post[],
result[];
int res;
/* TODO: *
* Must allow MT4 to access the server URL, *
* you should add URL "https://www.quandl.com/api/v3/datasets/ADB/LAB_UNEMP_JPN.csv" *
* in the list of allowed URLs *
* ( MT4 -> Tools -> Options -> [Tab]: "Expert Advisors" ): */
string aDataSOURCE_URL = "https://www.quandl.com/api/v3/datasets/ADB/LAB_UNEMP_JPN.csv";
string aDataSOURCE_API = "rows = 1&api_key=<My_API_Key>";
//-- Create the body of the POST request for API specifications and API-authorization
ArrayResize( post,
StringToCharArray( aDataSOURCE_API, // string text |--> [in] String to copy.
post, // uchar &array[] <--| [out] Array of uchar type.
0, // int start = 0 |--> [in] Position from which copying starts. Default - 0.
WHOLE_ARRAY, // int count = -1 |--> [in] Number of array elements to copy. Defines length of a resulting string. Default value is -1, which means copying up to the array end, or till terminating '\0'. Terminating zero will also be copied to the recipient array, in this case the size of a dynamic array can be increased if necessary to the size of the string. If the size of the dynamic array exceeds the length of the string, the size of the array will not be reduced.
CP_UTF8 // uint cp = CP_ACP |--> [in] The value of the code page. For the most-used code pages provide appropriate constants.
)
- 1
);
//-- Reset the last error code
ResetLastError();
//-- Loading a html page from Quandl
int timeout = 5000; //-- Timeout below 1000 (1 sec.) is not enough for slow Internet connection
res = WebRequest( "POST", // const string method |--> [in] HTTP method.
aDataSOURCE_URL, // const string URL |--> [in] URL.
cookie, // const string cookie |--> [in] Cookie value.
NULL, // const string referrer |--> [in] Value of the Referer header of the HTTP request.
timeout, // int timeout |--> [in] Timeout in milliseconds.
post, // const char &data |--> [in] Data array of the HTTP message body
ArraySize( post ), // int data_size |--> [in] Size of the data[] array.
result, // char &result <--| [out] An array containing server response data.
headers // string &result_headers <--| [out] Server response headers.
);
//-- Check errors
if ( res == -1 )
{ Print( "WebRequest Error. Error code = ", GetLastError() ); //-- Perhaps the URL is not listed, display a message about the necessity to add the address
MessageBox( "Add the address '" + aDataSOURCE_URL + "' in the list of allowed URLs on tab 'Expert Advisors'", "Error", MB_ICONINFORMATION );
}
else //-- Load was successful
{
PrintFormat( "The data has been successfully loaded, size = %d bytes.", ArraySize( result ) );
//-- parse the content ---------------------------------------
/*
"Date,Value
2013-12-31,4.0"
*/
//-- consume the content -------------------------------------
//...
}
}
There are 4 principal items to take care of:
0: an MT4 permission to use a given URL
1: an API URL setup - <pragma>://<URL.ip>/<relative.URL>
2: an API const char &data[] assy. [?<par.i>=<val.i>[&<par.j>=<val.j>[&...]]]
3: an API int data_size length calculation
Addendum: This is more a list of reasons, why avoiding use of the New-MQL4.56789 WebRequest() function variants:
Whereas MQL4 documentation promises a simple use of WebRequest() function variants, (cit.:) "1. Sending simple requests of type "key=value" using the header Content-Type: application/x-www-form-urlencoded.", the reality is far from a promised simple use-case:
0: DONE: an MT4 administrative step ( weakness: cannot enforce MT4 to communicate { http | https } protocol(s) over other than their default port(s) ~ { :80 | :443 }
1: URL consists of two ( three, if using a :port specifier, which does not work in MT4 (ref. 0: right above ) ) parts. <URL.ip_address> is the first one and can be expressed in a canonical IPv4 form ( 10.38.221.136 ) or in a DNS-translateable form ( MT4_APAC_PRIMARY.broker.com ). The second part, the <relative.URL>, specifies the HttpServer itself, where to locate a file ( it is a HttpServer--relative file location ). Published WebRequest permit to use the both parts joined together, ref. aDataSOURCE_URL.
3: WebServer API, if constructed so, may permit to add some additional parameters, that can be specified and presented to the WebServer. The presentation depends whether the { HTTP GET | HTTP POST } protocol-option is selected in on a caller side.
4: each call to MT4 WebRequest() also requires the caller to specify a length of a data content parameter ( ref. the use of ArraySize( post ), // int data_size )

Store and update JSON Data on a Server

My web-application should be able to store and update (also load) JSON data on a Server.
However, the data may contain some big arrays where every time they are saved only a new entry was appended.
My solution:
send updates to the server with a key-path within the json data.
Currently I'm sending the data with an xmlhttprequest by jquery, like this
/**
* Asynchronously writes a file on the server (via PHP-script).
* #param {String} file complete filename (path/to/file.ext)
* #param content content that should be written. may be a js object.
* #param {Array} updatePath (optional), json only. not the entire file is written,
* but the given path within the object is updated. by default the path is supposed to contain an array and the
* content is appended to it.
* #param {String} key (optional) in combination with updatePath. if a key is provided, then the content is written
* to a field named as this parameters content at the data located at the updatePath from the old content.
*
* #returns {Promise}
*/
io.write = function (file, content, updatePath, key) {
if (utils.isObject(content)) content = JSON.stringify(content, null, "\t");
file = io.parsePath(file);
var data = {f: file, t: content};
if (typeof updatePath !== "undefined") {
if (Array.isArray(updatePath)) updatePath = updatePath.join('.');
data.a = updatePath;
if (typeof key !== "undefined") data.k = key;
}
return new Promise(function (resolve, reject) {
$.ajax({
type: 'POST',
url: io.url.write,
data: data,
success: function (data) {
data = data.split("\n");
if (data[0] == "ok") resolve(data[1]);
else reject(new Error((data[0] == "error" ? "PHP error:\n" : "") + data.slice(1).join("\n")));
},
cache: false,
error: function (j, t, e) {
reject(e);
//throw new Error("Error writing file '" + file + "'\n" + JSON.stringify(j) + " " + e);
}
});
});
};
On the Server, a php script manages the rest like this:
recieves the data and checks if its valid
check if the given file path is writable
if the file exists and is .json
read it and decode the json
return an error on invalid json
if there is no update path given
just write the data
if there is an update path given
return an error if the update path in the JSON data can't be traversed (or file didn't exist)
update the data at update-path
write the pretty-printed json to file
However I'm not perfectly happy and problems kept coming for the last weeks.
My Questions
Generally: How would you approach this problem? alternative suggestions, databases? any libraries that could help?
Note: I would prefer solutions, that just use php or some standart apache stuff.
One problem was, that sometimes, multiple writes on the same file were triggered. To avoid this I used the Promises (wrapped it because I read jquerys deferred stuff isnt Promise/A compliant) client side, but I dont feel 100% sure it is working. Is there a (file) lock in php that works across multiple requests?
Every now and then the JSON files break and its not clear to me how to reproduce the problem. At the time it breaks, I don't have a history of what happened. Any general debugging strategies with a client/server saving/loading process like this?
I wrote a comet enable web server that does diffs on updates of json data structures. For the exactly same reason. The server keeps a few version of a json document and serves client with different version of the json document with the update they need to get to the most reason version of the json data.
Maybe you could reuse some of my code, written in C++ and CoffeeScript: https://github.com/TorstenRobitzki/Sioux
If you have concurrent write accesses to your data structure, are your sure, that who ever writes to the file has the right version of the file in mind when reading the file?

Blockchain API to determine transaction confirmations

I am trying to identify whether a transaction in the bitcoin blockchain has been confirmed or not. I have accessed a JSON representation of the transaction from blockchain.info using this url: https://blockchain.info/tx/62f9419e56ac1b628840aaf52307867f9856d7a52b3c1d945a9938a3021cbf2c?show_adv=false&format=json
I can not find anything in the response that indicates how many confirmations it has...
{"block_height":221580,"time":1361068368,"inputs":[{"prev_out":{"n":0,"value":100000000,"addr":"1NaPjDPGcfaVCBd3cTmy4zEPjRbDwzkW49","tx_index":53213157,"type":0}},{"prev_out":{"n":0,"value":100000,"addr":"1FDBdn8cseukiteu1myGQCfgYnncdMNpFk","tx_index":53252395,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"13QRi4W5bq3FWrNGrWGcF1dH4mSWD6Huun","tx_index":52575903,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"1MhEJx1BodWATGxoZ7az3GnmUQwx2adCG2","tx_index":53376409,"type":0}},{"prev_out":{"n":1,"value":90000000,"addr":"1FosGa87ZSjoagVu1j8djiJKzUeLkhhp6P","tx_index":53308634,"type":0}},{"prev_out":{"n":0,"value":200000,"addr":"1DZzEunCP1SxBsz2aZah2q9WAFuYSsDrq9","tx_index":53272656,"type":0}},{"prev_out":{"n":1,"value":98500000,"addr":"19q8NEgZKQcQMMx5z16JETbe1bx6StNZfj","tx_index":53506579,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"15LXjh36usUspAYsGnhURVEnPn86W7SPSu","tx_index":53532799,"type":0}},{"prev_out":{"n":1,"value":119000000,"addr":"1PNbeqfPgMjjL6sLdXqkNZyCSkGFHop3bz","tx_index":53492488,"type":0}},{"prev_out":{"n":1,"value":150000000,"addr":"153hqmnNqUM8RGWdLE12tj74aAyS9U2pe7","tx_index":53283295,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"149BGgDjaMyYfYnrja4asYtuUnpsBjobnH","tx_index":53440208,"type":0}},{"prev_out":{"n":167,"value":35000,"addr":"1F3eAsYGC45s2Q8XiE7ywGXMr8QLB8FTCD","tx_index":53578752,"type":0}},{"prev_out":{"n":862,"value":5000,"addr":"1CD4Dcy3yUiBejmQX1hKfJi1y5ysAX9RwZ","tx_index":53578752,"type":0}},{"prev_out":{"n":0,"value":60000000,"addr":"1Q7hDXko9U8MxoAZGmYk2se6tf8WFSQbUK","tx_index":53305081,"type":0}},{"prev_out":{"n":1,"value":98000000,"addr":"1RuMjWETvUPAUqfJKhZ4GBo5tKuszbDTA","tx_index":53521527,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"17rBkeKtc7APY5PQjbicBbucfaUA1PZSpm","tx_index":53511134,"type":0}},{"prev_out":{"n":0,"value":169216027,"addr":"1EBz5v7dJfBPJzSwivVQcY19eT5hUBxa8w","tx_index":53194652,"type":0}},{"prev_out":{"n":1,"value":80000000,"addr":"1KLn85reRxN1JZL1S3gD2Kp2x8LZ14rz6S","tx_index":53194567,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"1EppQ2h8Ddvp1vsoSb2DLJqJAws2DrYnv9","tx_index":53190665,"type":0}},{"prev_out":{"n":174,"value":2,"addr":"1CDDR1vZtZPWc48v4brHmka3tDpXbuT9wd","tx_index":53620404,"type":0}},{"prev_out":{"n":1,"value":100000000,"addr":"12zQxFPPh5rsUyakdZZyADj2N5bFRFZRcd","tx_index":53540021,"type":0}},{"prev_out":{"n":1,"value":801624197,"addr":"1JEjtpHB7aZJm3QSRp76qQqchFfs4TjDeE","tx_index":53526428,"type":0}},{"prev_out":{"n":1,"value":100000000,"addr":"1K1Sn9V775d7i94voiYLLUSaFNUQ9BVj9Q","tx_index":53430153,"type":0}},{"prev_out":{"n":1156,"value":1,"addr":"1CxXkpmJ9Nr4S9b3rKeKU5WWLXGp5nv553","tx_index":53619724,"type":0}},{"prev_out":{"n":0,"value":99950000,"addr":"1Hp5GdoX4oXmjUD6ZRKvXNJQCZp2sk712c","tx_index":53229930,"type":0}},{"prev_out":{"n":1,"value":98000000,"addr":"12aqif4GXBd17N6EFj4onrHLd8febY4n6j","tx_index":53160076,"type":0}},{"prev_out":{"n":0,"value":357452267,"addr":"146eveRJD2YnxvNBw4hHtZn8xR3LHHVxtH","tx_index":53651895,"type":0}},{"prev_out":{"n":1,"value":60000000,"addr":"1FZximueHPa9sqTZSg2Q4LAsg91dZaJK5D","tx_index":53640062,"type":0}},{"prev_out":{"n":1,"value":100000000,"addr":"12VL7U1BLf8kLrkN3sca9w5dGVhVAy1kvD","tx_index":53578503,"type":0}},{"prev_out":{"n":0,"value":67315003,"addr":"1BbYNxYAGJJJz6wP4pK5eHmwcieRSiPDZm","tx_index":53437082,"type":0}},{"prev_out":{"n":1,"value":100000000,"addr":"1DoTTLAs5VUm2QHBXeL34h2kSfYnqSpsCj","tx_index":53116751,"type":0}},{"prev_out":{"n":1,"value":195951000,"addr":"1Epje2MuDrckP4zVJmRXyEu5jWby2MvgHy","tx_index":53599874,"type":0}},{"prev_out":{"n":25,"value":1,"addr":"1CdACYi1JQDGekGPPc8bd3vq5d5v6s2KKY","tx_index":53620401,"type":0}},{"prev_out":{"n":105,"value":2,"addr":"15dsKW8yotixATZdDomBkRJh7YvzJJ4z7X","tx_index":53620401,"type":0}},{"prev_out":{"n":1,"value":90000000,"addr":"17ra7TQoPSmrxvLXGhupexd3Dk9fnZLM8Q","tx_index":53675760,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"1Fm2j4k7XK8veWdeJaxDuZdujdQQh3mj9j","tx_index":53696368,"type":0}},{"prev_out":{"n":39,"value":1500,"addr":"1PB3DrsvvTMkxv7AoV5FdMAVbrnv1R9AvF","tx_index":53366964,"type":0}},{"prev_out":{"n":1,"value":70000000,"addr":"1HDxhL7H8thYC9RaLwACRoeTP6cjTERzkq","tx_index":53197639,"type":0}},{"prev_out":{"n":0,"value":60000000,"addr":"1Mzt5Y815fnf3rbCigx411bmUGqTiAMMMf","tx_index":53534495,"type":0}},{"prev_out":{"n":0,"value":3000000,"addr":"191cP1rSfJX9kATiujqbavKPHta8ryPbUk","tx_index":53397518,"type":0}},{"prev_out":{"n":0,"value":80000000,"addr":"1E7zhSRBexQYN98PxKZrnocZzb7yuCoobF","tx_index":53616202,"type":0}},{"prev_out":{"n":0,"value":60000000,"addr":"1DNGJSkn2jaBtzHpeu2EV8Za7GzLkYRKrk","tx_index":53211317,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"1FPHkZfftpfBVYg46sBAZmU7k4R6nMWjgm","tx_index":53714935,"type":0}},{"prev_out":{"n":1,"value":80000000,"addr":"1NJmpCAfoeZa8M8RoWCT1PAs85k4URKwuL","tx_index":53437066,"type":0}},{"prev_out":{"n":1,"value":320000000,"addr":"17MCmBPgv2SEKsmGf1o7X6qbK59C1Pnwr3","tx_index":53584050,"type":0}},{"prev_out":{"n":0,"value":100000000,"addr":"1KrtH7ceJthEfBFd8t9G4Vohj2myGB1eDj","tx_index":53212472,"type":0}},{"prev_out":{"n":1,"value":103000000,"addr":"13zZzqKR3XYPUKpLWvGkimSJbDMaJim9Ru","tx_index":53218032,"type":0}},{"prev_out":{"n":1,"value":92500000,"addr":"1G5EdCerj7Yryoc4tpmCrFe7rvkbLcjHtz","tx_index":53633538,"type":0}},{"prev_out":{"n":0,"value":97800000,"addr":"1K59Q1UJSULsHhs4Rv8PiakEhDK689jQSj","tx_index":53232395,"type":0}}],"vout_sz":2,"relayed_by":"184.71.200.221","hash":"62f9419e56ac1b628840aaf52307867f9856d7a52b3c1d945a9938a3021cbf2c","vin_sz":52,"tx_index":53744354,"ver":1,"out":[{"n":0,"value":1000000,"addr":"1cm8zPZqjfWs5MBg8yKxJwWvDAkqF4CVu","tx_index":53744354,"type":0},{"n":1,"value":5000000000,"addr":"1EGP5pSnttKRdAcPxdiTviSrjsyHEAnXhy","tx_index":53744354,"type":0}],"size":9439}
The concept of "confirmation" on Bitcoin essentially translates to how many blocks have been generated since the block containing transaction. In other words, you'll have to issue another request to get the current network block count (http://blockchain.info/q/getblockcount), and the confirmation count then becomes current_block_count - transaction_block_height + 1. Note that block_height is only present in the response if at least one block was generated since (i.e. new transactions may not even include the element).
Here is sample nodejs code to determine block count of the transaction.
const axios = require('axios');
async function getConfirmations(transactionId) {
try {
const response = await
axios.get(`https://blockchain.info/rawtx/${transactionId}`);
const blockHeight = response.data.block_height;
const currentBlockHeight = await axios.get(`https://blockchain.info/latestblock`)
.then(res => res.data.height);
const confirmations = currentBlockHeight - blockHeight + 1;
return confirmations;
} catch (error) {
console.error(error);
}
}