How do I download SVG/SVF for offline viewing with Autodesk Model Forge APIs - autodesk

I am able to accomplish all steps to upload Revit files and translate and load in viewer. I am now trying to download the translated SVG/SVF for offline viewing. I found reference to the following endpoint and tested it out with this:
function download(){
var uri = 'https://developer.api.autodesk.com/derivativeservice/v2/derivatives/<<urn>>' ;
var authorizationHeader = 'Bearer <<token>>'
request.get(
{
url: uri,
headers:
{
'Authorization': authorizationHeader,
'Accept-Encoding': 'gzip, deflate'
},
},
function(error, response, body){
if(!error){
console.log(body);
}else{
console.log(error);
}
});
}
API returns:
{"diagnostic":"Derivative api only supports adsk.viewing & adsk.objects urn"}

The urn should be url-encoded, not base64 encoded.

There are several steps if you desire to grab all the required files for offline viewing. Start by checking the downloadBubble method (node.js) in extract project:
this.downloadBubble =function (urn, outPath) {
var self =this ;
self._outPath =outPath ;
return (new Promise (function (fulfill, reject) {
self._progress.msg ='Downloading manifest' ;
self.getManifest (urn)
.then (function (bubble) {
//utils.writeFile (outPath + 'bubble.json', bubble) ;
self._progress.msg ='Listing all derivative files' ;
self.listAllDerivativeFiles (bubble.body, function (error, result) {
self._progress._filesToFetch =result.list.length ;
console.log ('Number of files to fetch:', self._progress._filesToFetch) ;
self._progress._estimatedSize =0 | (result.totalSize / (1024 * 1024)) ;
console.log ('Estimated download size:', self._progress._estimatedSize, 'MB') ;
//self.fixFlatBubbles (result) ;
//self.fixFusionBubbles (result) ;
self._progress.msg ='Downloading derivative files' ;
self.downloadAllDerivativeFiles (result.list, self._outPath, function (failed, succeeded) {
//if ( ++self._done == 1 /*2*/ )
// return ;
self.failed =failed ;
self.succeeded =succeeded ;
fulfill (self) ;
}) ;
}) ;
})
.catch (function (err) {
console.error ('Error:', err.message) ;
self._errors.push (err.message) ;
reject (self) ;
})
;
})) ;
} ;
Test it live at https://extract.autodesk.io

Related

Resize all existing images stored in firebase storage and update the newly resized image url to database via api call [duplicate]

This question already has an answer here:
How can I resize all existing images in firebase storage?
(1 answer)
Closed 9 months ago.
I have requirement to resize new and existing images stored in firebase store. For new image, I enabled firebase's resize image extension. For existing image, how can I resized the image and get the newly resized image url to update back to database via api.
Here is my firebase function to get existing image urls from database. My question is how to resize the image and get the new image url?
const functions = require("firebase-functions");
const axios =require("axios");
async function getAlbums() {
const endpoint = "https://api.mydomain.com/graphql";
const headers = {
"content-type": "application/json",
};
const graphqlQuery = {
"query": `query Albums {
albums {
id
album_cover
}
}`
};
functions.logger.info("Call API");
const response = await axios({
url: endpoint,
method: 'post',
headers: headers,
data: graphqlQuery
});
if(response.errors) {
functions.logger.info("API ERROR : ", response.errors) // errors if any
} else {
return response.data.data.albums;
}
}
exports.manualGenerateResizedImage = functions.https.onRequest(async () => {
const albums = await getAlbums();
functions.logger.info("No. of Album : ", albums.length);
});
I think the below answer from Renaud Tarnec will definitely help you.
If you look at the code of the "Resize Images" extension, you will see that the Cloud Function that underlies the extension is triggered by a onFinalize event, which means:
When a new object (or a new generation of an existing object) is
successfully created in the bucket. This includes copying or rewriting
an existing object.
So, without rewriting/regenerating the existing images the Extension will not be triggered.
However, you could easily write your own Cloud Function that does the same thing but is triggered, for example, by a call to a specific URL (HTTPS cloud Function) or by creating a new document in a temporary Firestore Collection (background triggered CF).
This Cloud Function would execute the following steps:
Get all the files of your bucket, see the getFiles() method of the
Google Cloud Storage Node.js Client API. This method returns a
GetFilesResponse object which is an Array of File instances.
By looping over the array, for each file, check if the file has a
corresponding resized image in the bucket (depending on the way you
configured the Extension, the resized images may be in a specific
folder)
If a file does not have a corresponding resized image, execute the
same business logic of the Extension Cloud Function for this File.
There is an official Cloud Function sample which shows how to create a Cloud Storage triggered Firebase Function that will create resized thumbnails from uploaded images and upload them to the database URL, (see the last lines of index.js file)
Note : If you have a lot of files to treat, you should most probably work by batch, since there is a limit of 9 minutes for Cloud Function execution. Also, depending on the number of images to treat, you may need to increase the timeout value and/or the allocated memory of your Cloud Function, see https://firebase.google.com/docs/functions/manage-functions#set_timeout_and_memory_allocation
In case someone need it. This is how I resized existing image.
const functions = require("firebase-functions");
const axios = require("axios");
const { Storage } = require("#google-cloud/storage");
const storage = new Storage();
// Don't forget to replace with your bucket name
const bucket = storage.bucket("projectid.appspot.com");
async function getAlbums() {
const endpoint = "https://api.mydomain.com/graphql";
const headers = {
"content-type": "application/json",
};
const graphqlQuery = {
query: `query Albums {
albums {
id
album_cover
}
}`,
};
const response = await axios({
url: endpoint,
method: "post",
headers: headers,
data: graphqlQuery,
});
if (response.errors) {
functions.logger.error("API ERROR : ", response.errors); // errors
if any
} else {
return response.data.data.albums;
}
}
function getFileName(url) {
var decodeURI = decodeURIComponent(url);
var index = decodeURI.lastIndexOf("/") + 1;
var filenameWithParam = decodeURI.substr(index);
index = filenameWithParam.lastIndexOf("?");
var filename = filenameWithParam.substr(0, index);
return filename;
}
function getFileNameFromFirestore(url) {
var index = url.lastIndexOf("/") + 1;
var filename = url.substr(index);
return filename;
}
const triggerBucketEvent = async () => {
bucket.getFiles(
{
prefix: "images/albums", // you can add a path prefix
autoPaginate: false,
},
async (err, files) => {
if (err) {
functions.logger.error(err);
return;
}
const albums = await getAlbums();
await Promise.all(
files.map((file) => {
var fileName = getFileNameFromFirestore(file.name);
var result = albums.find((obj) => {
return getFileName(obj.album_cover) === fileName;
});
if (result) {
var file_ext = fileName.substr(
(Math.max(0, fileName.lastIndexOf(".")) || Infinity) + 1
);
var newFileName = result.id + "." + file_ext;
// Copy each file on thumbs directory with the different name
file.copy("images/albums/" + newFileName);
} else {
functions.logger.info(file.name, " not found in album list!");
}
})
);
}
);
};
exports.manualGenerateResizedImage = functions.https.onRequest(async () => {
await triggerBucketEvent();
});

aws s3 bucket image upload in angular6 having problem in return

I am using this approach to upload images to aws s3 bucket:
https://grokonez.com/aws/angular-4-amazon-s3-example-how-to-upload-file-to-s3-bucket
This works fine as an individual task but as far as I rely on the result which is coming a bit later due to async behavior may be. I would like the next task to be executed just after the confirmation.
upload() {
let file: any;
// let urltype = '';
let filename = '';
// let b: boolean;
for (let i = 0 ; i < this.urls.length ; i++) {
file = this.selectedFiles[i];
// urltype = this.urltype[i];
filename = file.name;
const k = uuid() + '.' + filename.substr((filename.lastIndexOf('.') + 1));
this.uploadservice.uploadfile(file, k);
console.log('done');
// console.log('file: ' + file + ' : ' + filename);
// let x = this.userservice.PostImage('test', file);
// console.log('value of ' + x);
}
// return b;
}
fileupload service:
bucket.upload(params, function (err, data) {
if (err) {
console.log('There was an error uploading your file: ', err);
return false;
}
console.log('Successfully uploaded file.', data);
return true;
}).promise();
}
Here, done is getting executed before the file upload is done.
I think you should check out a tutorial for asynchronous programming and try to play around with couple of examples using simple timeouts to get the hang of it and then proceed with more complex things like s3 and aws.
Here is how I suggest you start your journey:
1) Learn the basic concepts of asynchronous programming using pure JS
https://eloquentjavascript.net/11_async.html
2) Play around with your own examples using callbacks and timeouts
3) Replace the callbacks with Promises
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises
4) Do it the "angular" way with rxjs Observables (similar to JS Observable)
http://reactivex.io/rxjs/class/es6/Observable.js~Observable.html
PS: To be more concrete:
Your code fails because the following line is executed in an asynchronous manner. Thus the code will call your uploadfile function and will immedietly continue executing without waiting.
this.uploadservice.uploadfile(file, k);
Once you follow all the points I described above you will be able to do something like this (using a Promise):
this.uploadservice.uploadfile(file, k)
.then( result => {
console.log('Upload finished');
})
.catch(error => {
console.log('Something went wrong');
});

How to dynamically read external json files in node.js?

I am creating a website that reads externally hosted json files and then uses node.js to populate the sites content.
Just to demonstrate what I'm after, this is a really simplified version of what I'm trying to do in node.js
var ids = [111, 222, 333];
ids.forEach(function(id){
var json = getJSONsomehow('http://www.website.com/'+id+'.json');
buildPageContent(json);
});
Is what I want to do possible?
(Marked as a duplicate of "How do I return the response from an asynchronous call?" see my comment below for my rebuttal)
You are trying to get it synchronously. What you should aim for instead, is not a function used like this:
var json = getJSONsomehow('http://www.website.com/'+id+'.json');
but more like this:
getJSONsomehow('http://www.website.com/'+id+'.json', function (err, json) {
if (err) {
// error
} else {
// your json can be used here
}
});
or like this:
getJSONsomehow('http://www.website.com/'+id+'.json')
.then(function (json) {
// you can use your json here
})
.catch(function (err) {
// error
});
You can use the request module to get your data with something like this:
var request = require('request');
var url = 'http://www.website.com/'+id+'.json';
request.get({url: url, json: true}, (err, res, data) => {
if (err) {
// handle error
} else if (res.statusCode === 200) {
// you can use data here - already parsed as json
} else {
// response other than 200 OK
}
});
For a working example see this answer.
For more info see: https://www.npmjs.com/package/request
I think problem is in async request. Function will return result before request finished.
AJAX_req.open( "GET", url, true );
Third parameter specified async request.
You should add handler and do all you want after request finished.
For example:
function AJAX_JSON_Req( url ) {
var AJAX_req = new XMLHttpRequest.XMLHttpRequest();
AJAX_req.open( "GET", url, true );
AJAX_req.setRequestHeader("Content-type", "application/json");
AJAX_req.onreadystatechange = function() {
if (AJAX_req.readyState == 4 && AJAX_req.status == 200) {
console.log(AJAX_req.responseText);
}
};
}

aws lambda s3 function isn't called inside alexa skills kit

I am trying to create a skill for Amazon Echo that will call a JSON file from AWS S3. When I call the code from s3 basic get function it works. And the Amazon Alexa code works on its own.
But when I call them together the function gets skipped. So for the following code the console gets called before and after s3.getObject(). But the middle one gets skipped. I do not understand why.
I also checked whether s3 was being called, and it is.
let aws = require('aws-sdk');
let s3 = new aws.S3({ apiVersion: '2006-03-01'});
function callS3() {
console.log('loading S3 function');
var myData = [];
const params = {
Bucket: 'cvo-echo',
Key: 'data.json'
};
console.log("trying to get s3");
s3.getObject(params, (err, data) => {
if (err) {
console.log('error in s3 get: \n' + err);
//const message = `Error getting object ${key} from bucket ${bucket}.
// Make sure they exist and your bucket is in same region as this function.
//console.log(message);
} else {
console.log('CONTENT TYPE: ', data.ContentType);
console.log('Data body: \n' + data.Body.toString());
myData = JSON.parse(data.Body.toString());
console.log('myData.length = ' + myData.length);
}
console.log('myData >> ' + myData);
});
console.log('finished callS3() func');
return myData;
}
This might be a control flow issue, I've worked with amazons sdk before and was running into similar issues. Try implementing async within your code to have a better control of what happens when. This way methods won't skip.
UPDATE: adding some code examples of what you could do.
function callS3(callback) {
console.log('loading S3 function');
var myData = [];
const params = {
Bucket: 'cvo-echo',
Key: 'data.json'
};
console.log("trying to get s3");
s3.getObject(params, (err, data) => {
if (err) {
console.log('error in s3 get: \n' + err);
//const message = `Error getting object ${key} from bucket ${bucket}.
// Make sure they exist and your bucket is in same region as this function.
//console.log(message);
callback(err,null);//callback the error.
} else {
console.log('CONTENT TYPE: ', data.ContentType);
console.log('Data body: \n' + data.Body.toString());
myData = JSON.parse(data.Body.toString());
console.log('myData.length = ' + myData.length);
console.log('myData >> ' + myData);
console.log('finished callS3() func');
//Include the callback inside of the S3 call to make sure this function returns until the S3 call completes.
callback(null,myData); // first element is an error and second is your data, first element is null if no error ocurred.
}
});
}
/*
This MIGHT work without async but just in case you can read more about
async.waterfall where functions pass down values to the next function.
*/
async.waterfall([
callS3()//you can include more functions here, the callback from the last function will be available for the next.
//myNextFunction()
],function(err,myData){
//you can use myData here.
})
It's a timing issue. Here is an example of loading a JSON file from an S3 share when a session is started.
function onLaunch(launchRequest, session, callback) {
var sBucket = "your-bucket-name";
var sFile = "data.json";
var params = {Bucket: sBucket, Key: sFile};
var s3 = new AWS.S3();
var s3file = s3.getObject(params)
new AWS.S3().getObject(params, function(err, data) {
if (!err) {
var json = JSON.parse(new Buffer(data.Body).toString("utf8"));
for(var i = 0; i < json.length; i++) {
console.log("name:" + json[i].name + ", age:" + json[i].age);
}
getWelcomeResponse(callback);
} else {
console.log(err.toString());
}
});
}

WebRTC: Connecting multiple listeners to one client, one at a time

I have been trying to get WebRTC to function with a broadcaster and multiple listeners but am stuck when it comes to transferal descriptions and candidates via signalling (with nodejs & socket.io).
I can get the process working between two browsers with a simple nodejs socket app which simply broadcasts the descriptions and candidates to other already connected clients, but when I attempt to store a description and connect with a newly opened browser, nothing happens.
What I basically need to understand is what do I need to provide to one browser, in order for it to begin communicating with another? The project I am working on requires the ability for listeners to join rooms, authenticate, and begin listening to whatever media is being sent.
Below is my client side code:
var audioContext = new webkitAudioContext()
var client = null
var configuration =
{
'iceServers':
[{
'url': 'stun:stun.example.org'
}]
}
$(function ()
{
window.RTCPeerConnection = window.RTCPeerConnection || window.webkitRTCPeerConnection || window.mozRTCPeerConnection
client = new RTCPeerConnection(configuration, { optional:[ { RtpDataChannels: true } ]})
client.onnegotiationneeded = function ()
{
console.log('Negotiation needed')
createOffer()
}
client.onicecandidate = function (event)
{
console.log('onicecandidate')
socket.emit('candidate', JSON.stringify({ 'candidate': event.candidate }))
}
client.onaddstream = function (event)
{
console.log('onaddstream')
$('#player').attr('src', URL.createObjectURL(event.stream))
player.play()
}
socket.on('candidate', function (event)
{
candidate(event)
})
socket.on('description', function (message)
{
if(!client) { return }
client.setRemoteDescription(new RTCSessionDescription(message.sdp), function () {
if (client.remoteDescription.type == 'offer')
client.createAnswer(function (description)
{
client.setLocalDescription(description, function ()
{
socket.emit('description', JSON.stringify({ 'sdp':client.localDescription }))
})
}, function (err)
{
console.log('error: ' + err)
})
}, function(err)
{
console.log('error: ' + err)
})
})
addStream()
})
function createOffer ()
{
if(!client) { return; }
client.createOffer(function (description)
{
console.log(description)
client.setLocalDescription(description, function ()
{
socket.emit('description', JSON.stringify({ 'sdp': client.localDescription }))
console.log('set local description')
})
})
}
function candidate (message)
{
if(message.candidate)
{
console.log('candidate')
client.addIceCandidate(new RTCIceCandidate(message.candidate))
}
}
function addStream ()
{
navigator.webkitGetUserMedia({audio: true, video: false}, function(stream)
{
client.addStream(stream)
})
}
And my signalling part of my server as it currently stands:
io.on 'connection', (socket) ->
socket.on 'description', (data) ->
parsed = JSON.parse data
socket.broadcast.emit 'description', parsed
socket.on 'candidate', (candidate) ->
parsed = JSON.parse candidate
socket.broadcast.emit 'candidate', parsed
I'd appreciate any insight into this. Thanks.
The "PeerConnection" as the name indicates can be used with only one other peer. You cannot cache the offer SDP generated by one PeerConnection instance to use it with more than one other peers.
In your case, you must create a PeerConnection for each browser that you want to send/receive audio and video from and then exchange the corresponding SDP offer and answers with those browsers via your signaling mechanism.
Please feel free to go through some of the links I have mentioned here to understand how WebRTC works.