Is disconnected workflows available with viewer V7 api - autodesk-forge

I try to use the workflows from " https://aps.autodesk.com/blog/disconnected-workflows " .
I use the .net core and sucess with viewer V6 , but when I try to use the V7 that console comes out of the error message "
GET https://cdn.derivative.autodesk.com/derivativeservice/v2/manifest/dXJuOmFkc2sub2JqZWN0czpvcy5vYmplY3Q6dGVzdF8wMTA2L3JhY2Jhc2ljc2FtcGxlcHJvamVjdC5ydnQ?domain=http%3A%2F%2Flocalhost%3A3060 net::ERR_INTERNET_DISCONNECTED "
This message occurred when I try to use service-worker offline the website and load the model I have cashed.
How can I fix it or other way to get the same result?
I am not sure that the problem is in the part I try to load the document or not.
I use this code
export function loadModel_cache(urn) {
let viewerError = document.getElementById("viewer-error");
if (viewerError) {
viewerError.parentNode.removeChild(viewerError);
}
function onDocumentLoadFailure() {
viewerError = document.createElement("div");
viewerError.id = "viewer-error";
viewerError.innerHTML =
"<span>Could not load document. Are you offline?</span>";
document.getElementById("preview").appendChild(viewerError);
updateOverlay();
console.error("Could not load document " + urn);
}
function onItemLoadSuccess(viewer) {}
function onItemLoadFailure() {
updateOverlay();
console.error("Could not load model from document " + urn);
}
currentUrn = null;
const status = document.querySelector(
`#overlay > ul > li[data-urn="${urn}"] > .model-status`
);
status.style.setProperty("display", "inline");
status.innerHTML = "(loading...)";
Autodesk.Viewing.Document.load(
"urn:" + urn,
(doc) => {
const viewables = doc.getRoot().search({ type: "geometry" });
console.log(viewables);
console.log(doc);
currentApp.loadDocumentNode(doc, viewables[0]).then(() => {
onItemLoadSuccess();
currentUrn = urn;
updateOverlay();
}); // Assuming there's always at least one viewable
},
onDocumentLoadFailure
);
replace this one that use in the source code
function loadModel(urn) {
let viewerError = document.getElementById("viewer-error");
if (viewerError) {
viewerError.parentNode.removeChild(viewerError);
}
function onDocumentLoadSuccess() {
const viewables = currentApp.bubble.search({ type: "geometry" });
console.log(viewables);
currentApp.selectItem(
viewables[0].data,
onItemLoadSuccess,
onItemLoadFailure
); // Assuming there's always at least one viewable
}
function onDocumentLoadFailure() {
viewerError = document.createElement("div");
viewerError.id = "viewer-error";
viewerError.innerHTML =
"<span>Could not load document. Are you offline?</span>";
document.getElementById("viewer").appendChild(viewerError);
updateOverlay();
console.error("Could not load document " + urn);
}
function onItemLoadSuccess(viewer) {
viewer.addEventListener(
Autodesk.Viewing.GEOMETRY_LOADED_EVENT,
function () {
currentUrn = urn;
updateOverlay();
}
);
}
function onItemLoadFailure() {
updateOverlay();
console.error("Could not load model from document " + urn);
}
currentUrn = null;
const status = document.querySelector(
`#overlay > ul > li[data-urn="${urn}"] > .model-status`
);
status.style.setProperty("display", "inline");
status.innerHTML = "(loading...)";
console.log(currentApp); //---------------------------------
currentApp.loadDocument(
"urn:" + urn,
onDocumentLoadSuccess,
onDocumentLoadFailure
);
}

Newer versions of the viewer may be talking to different backend services so if you wanted to enable the "disconnected workflows" there, it should be possible, you would just need to update the types of URLs the service workers should cache.
Currently the service worker code here assumes that all the derivatives are coming from https://developer.api.autodesk.com/derivativeservice/v2, and as you found out already, newer versions of the viewer are actually pulling the data from https://cdn.derivative.autodesk.com/derivativeservice/v2.
Also, be careful about the SVF vs SVF2 format difference. You will only be able to cache SVF data, not SVF2.

Related

Resize all existing images stored in firebase storage and update the newly resized image url to database via api call [duplicate]

This question already has an answer here:
How can I resize all existing images in firebase storage?
(1 answer)
Closed 9 months ago.
I have requirement to resize new and existing images stored in firebase store. For new image, I enabled firebase's resize image extension. For existing image, how can I resized the image and get the newly resized image url to update back to database via api.
Here is my firebase function to get existing image urls from database. My question is how to resize the image and get the new image url?
const functions = require("firebase-functions");
const axios =require("axios");
async function getAlbums() {
const endpoint = "https://api.mydomain.com/graphql";
const headers = {
"content-type": "application/json",
};
const graphqlQuery = {
"query": `query Albums {
albums {
id
album_cover
}
}`
};
functions.logger.info("Call API");
const response = await axios({
url: endpoint,
method: 'post',
headers: headers,
data: graphqlQuery
});
if(response.errors) {
functions.logger.info("API ERROR : ", response.errors) // errors if any
} else {
return response.data.data.albums;
}
}
exports.manualGenerateResizedImage = functions.https.onRequest(async () => {
const albums = await getAlbums();
functions.logger.info("No. of Album : ", albums.length);
});
I think the below answer from Renaud Tarnec will definitely help you.
If you look at the code of the "Resize Images" extension, you will see that the Cloud Function that underlies the extension is triggered by a onFinalize event, which means:
When a new object (or a new generation of an existing object) is
successfully created in the bucket. This includes copying or rewriting
an existing object.
So, without rewriting/regenerating the existing images the Extension will not be triggered.
However, you could easily write your own Cloud Function that does the same thing but is triggered, for example, by a call to a specific URL (HTTPS cloud Function) or by creating a new document in a temporary Firestore Collection (background triggered CF).
This Cloud Function would execute the following steps:
Get all the files of your bucket, see the getFiles() method of the
Google Cloud Storage Node.js Client API. This method returns a
GetFilesResponse object which is an Array of File instances.
By looping over the array, for each file, check if the file has a
corresponding resized image in the bucket (depending on the way you
configured the Extension, the resized images may be in a specific
folder)
If a file does not have a corresponding resized image, execute the
same business logic of the Extension Cloud Function for this File.
There is an official Cloud Function sample which shows how to create a Cloud Storage triggered Firebase Function that will create resized thumbnails from uploaded images and upload them to the database URL, (see the last lines of index.js file)
Note : If you have a lot of files to treat, you should most probably work by batch, since there is a limit of 9 minutes for Cloud Function execution. Also, depending on the number of images to treat, you may need to increase the timeout value and/or the allocated memory of your Cloud Function, see https://firebase.google.com/docs/functions/manage-functions#set_timeout_and_memory_allocation
In case someone need it. This is how I resized existing image.
const functions = require("firebase-functions");
const axios = require("axios");
const { Storage } = require("#google-cloud/storage");
const storage = new Storage();
// Don't forget to replace with your bucket name
const bucket = storage.bucket("projectid.appspot.com");
async function getAlbums() {
const endpoint = "https://api.mydomain.com/graphql";
const headers = {
"content-type": "application/json",
};
const graphqlQuery = {
query: `query Albums {
albums {
id
album_cover
}
}`,
};
const response = await axios({
url: endpoint,
method: "post",
headers: headers,
data: graphqlQuery,
});
if (response.errors) {
functions.logger.error("API ERROR : ", response.errors); // errors
if any
} else {
return response.data.data.albums;
}
}
function getFileName(url) {
var decodeURI = decodeURIComponent(url);
var index = decodeURI.lastIndexOf("/") + 1;
var filenameWithParam = decodeURI.substr(index);
index = filenameWithParam.lastIndexOf("?");
var filename = filenameWithParam.substr(0, index);
return filename;
}
function getFileNameFromFirestore(url) {
var index = url.lastIndexOf("/") + 1;
var filename = url.substr(index);
return filename;
}
const triggerBucketEvent = async () => {
bucket.getFiles(
{
prefix: "images/albums", // you can add a path prefix
autoPaginate: false,
},
async (err, files) => {
if (err) {
functions.logger.error(err);
return;
}
const albums = await getAlbums();
await Promise.all(
files.map((file) => {
var fileName = getFileNameFromFirestore(file.name);
var result = albums.find((obj) => {
return getFileName(obj.album_cover) === fileName;
});
if (result) {
var file_ext = fileName.substr(
(Math.max(0, fileName.lastIndexOf(".")) || Infinity) + 1
);
var newFileName = result.id + "." + file_ext;
// Copy each file on thumbs directory with the different name
file.copy("images/albums/" + newFileName);
} else {
functions.logger.info(file.name, " not found in album list!");
}
})
);
}
);
};
exports.manualGenerateResizedImage = functions.https.onRequest(async () => {
await triggerBucketEvent();
});

Forge api translating .fbx to SVF2 doesnt work and translates only to SVF

I am using forge-apis package on Node.js and I want to translate a .fbx file in SVF2. When I do so and load the model, size and GPU memory used is the same as normal translate to SVF and when I check viewer.model.isSVF2() it return false.
const {
DerivativesApi,
JobPayload,
JobPayloadInput,
JobPayloadOutput,
JobSvfOutputPayload} = require('forge-apis');
and
router.post('/jobs', async (req, res, next) => {
const xAdsForce = (req.body.xAdsForce === true);
let job = new JobPayload();
job.input = new JobPayloadInput();
job.input.urn = req.body.objectName;
if(req.body.rootFilename && req.body.compressedUrn) {
job.input.rootFilename = req.body.rootFilename;
job.input.compressedUrn = req.body.compressedUrn;
}
job.output = new JobPayloadOutput([
new JobSvfOutputPayload()
]);
job.output.formats[0].type = 'svf2' ;
job.output.formats[0].views = ['2d', '3d'];
try {
// Submit a translation job using [DerivativesApi](https://github.com/Autodesk-Forge/forge-api-nodejs-client/blob/master/docs/DerivativesApi.md#translate).
const result = await new DerivativesApi().translate(job, { xAdsForce: xAdsForce }, req.oauth_client, req.oauth_token);
res.status(200).end();
} catch(err) {
next(err);
}});
How can I handle this problem? Thanks a lot.

Ionic 4 : Recording Audio and Send to Server (File has been corrupted)

I would like to record the audio file in mobile application(iOS & Android) and tranfer to server as a formData in ionic 4. I have used the "cordova-plugin-media" to capture the audio using below logics
if (this.platform.is('ios')) {
this.filePaths = this.file.documentsDirectory;
this.fileExtension = '.m4a';
} else if (this.platform.is('android')) {
this.filePaths = this.file.externalDataDirectory;
this.fileExtension = '.3gp';
}
this.fileName = 'recording'+new Date().getHours()+new Date().getMinutes()+new Date().getSeconds()+this.fileExtension;
if(this.filePaths) {
this.file.createFile(this.filePaths,this.fileName,true).then((entry:FileEntry)=> {
this.audio = this.media.create(entry.toInternalURL());
this.audio.startRecord();
});
}
Even I have tried to create the media directly without "File Creation"
I can record and play the audio, but If I am trying to send this file
to server using below logics It won't send properly(corrupted data)
and also web application unable to play .m4a extensions
.
Please correct me If I am doing anything wrong in my code
Upload logic:
let formData:FormData = new FormData();
formData.append('recordID' , feedbackID);
that.file.readAsDataURL(filePath,file.name).then((data)=>{
const audioBlob = new Blob([data], { type: file.type });
formData.append('files', audioBlob, file.name);
that.uploadFormData(formData,feedbackID); //POST Logics -
})
;
I have used the soultion as suggested by Ankush and it works fine.
Used readAsArrayBuffer instead of readAsDataURL.
The .m4a format has supported both ios and android. Also I can
download the the same file from web application.
I am using below code to upload the image to the server. I assume that only a few modifications will be required in this code to transfer media instead of the image file.
private uploadPicture(imagePath: string, apiUrl: string): Observable<ApiResponse<ImageUploadResponseModel>> {
return this.convertFileFromFilePathToBlob(imagePath).pipe(
switchMap(item => this.convertBlobToFormData(item)),
switchMap(formData => this.postImageToServer(formData, apiUrl))
);
}
Rest functions used in above code:
private postImageToServer(formData: FormData, apiUrl: string): Observable<ApiResponse<ImageUploadResponseModel>> {
const requestHeaders = new HttpHeaders({ enctype: 'multipart/form-data' });
return this.http.post<ApiResponse<ImageUploadResponseModel>>(apiUrl, formData, { headers: requestHeaders });
}
private convertBlobToFormData(blob: Blob): Observable<FormData> {
return new Observable<FormData>(subscriber => {
// A Blob() is almost a File() - it's just missing the two properties below which we will add
// tslint:disable-next-line: no-string-literal
blob['lastModifiedDate'] = new Date();
// tslint:disable-next-line: no-string-literal
blob['name'] = 'sample.jpeg';
const formData = new FormData();
formData.append('file', blob as Blob, 'sample.jpeg');
subscriber.next(formData);
subscriber.complete();
});
}
private convertFileFromFilePathToBlob(imagePath: string): Observable<Blob> {
return new Observable<Blob>(subscriber => {
const directoryPath = imagePath.substr(0, imagePath.lastIndexOf('/'));
let fileName = imagePath.split('/').pop();
fileName = fileName.split('?')[0];
this.file.readAsArrayBuffer(directoryPath, fileName).then(fileEntry => {
const imgBlob: any = new Blob([fileEntry], { type: 'image/jpeg' });
imgBlob.name = 'sample.jpeg';
subscriber.next(imgBlob);
subscriber.complete();
}, () => {
subscriber.error('Some error occured while reading image from the filepath.');
});
});
}

Google Cloud Storage `predefinedAcl` and `file.makePublic()` not working

I'm trying to get permanent download URLs when I upload files to Firebase Storage (Google Cloud Storage) from Firebase Cloud Functions (Google Cloud Functions).
I tried setting predefinedAcl to authenticatedRead and to publicRead. Both resulted in 403 (Forbidden) errors when my app tried to download the files. This is from the documentation for CreateWriteStreamOptions. Here's the code:
const {Storage} = require('#google-cloud/storage');
const storage = new Storage({ projectId: 'languagetwo-cd94d' });
const myBucket = storage.bucket('languagetwo-cd94d.appspot.com');
var mp3Promise = new Promise(function(resolve, reject) {
let options = {
metadata: {
contentType: 'audio/mp3',
public: true
}
};
synthesizeParams.accept = 'audio/mp3';
var file = myBucket.file('Audio/' + longLanguage + '/' + pronunciation + '/' + word + '.mp3');
textToSpeech.synthesize(synthesizeParams)
.then(function(audio) {
audio.pipe(file.createWriteStream(options));
})
.then(function() {
resolve('http://storage.googleapis.com/languagetwo-cd94d.appspot.com/Audio/' + longLanguage + '/' + pronunciation + '/' + word + '.mp3');
})
.catch(error => console.error(error));
});
That code executes without an error, writes the file to Storage, and passes the download URL to the next function. When I try to download the file with this URL:
http://storage.googleapis.com/languagetwo-cd94d.appspot.com/Audio/English/United_States-Allison-Female-IBM/catbirds.mp3
I get this error:
<Error>
<Code>AccessDenied</Code>
<Message>Access denied.</Message>
<Details>
Anonymous caller does not have storage.objects.get access to languagetwo-cd94d.appspot.com/Audio/English/United_States-Allison-Female-IBM/catbirds.mp3.
</Details>
</Error>
Downloading the file from my app (as an authorized user) I get the 403 (Forbidden) error message.
I've also tried this property, with the same result:
let options = {
metadata: {
contentType: 'audio/webm',
predefinedAcl: 'publicRead'
}
};
Moving on, I tried file.makePublic():
const {Storage} = require('#google-cloud/storage');
const storage = new Storage({ projectId: 'languagetwo-cd94d' });
const myBucket = storage.bucket('languagetwo-cd94d.appspot.com');
var mp3Promise = new Promise(function(resolve, reject) {
let options = {
metadata: {
contentType: 'audio/mp3'
}
};
synthesizeParams.accept = 'audio/mp3';
var file = myBucket.file('Audio/' + longLanguage + '/' + pronunciation + '/' + word + '.mp3');
textToSpeech.synthesize(synthesizeParams)
.then(function(audio) {
audio.pipe(file.createWriteStream(options));
})
.then(function() {
file.makePublic()
.then(function(data) {
console.log(data)
resolve('http://storage.googleapis.com/languagetwo-cd94d.appspot.com/Audio/' + longLanguage + '/' + pronunciation + '/' + word + '.mp3');
})
.catch(error => console.error(error));
})
.catch(error => console.error(error));
});
This code didn't even execute. It crashed at file.makePublic() and the error message was
{ Error: No such object: languagetwo-cd94d.appspot.com/Audio/English/United_States-Allison-Female-IBM/warblers.mp3
I'm not sure what this error means. My guess is that file.createWriteStream() wrote to a file location, and then file.makePublic() couldn't find that file location.
I found the permissions for warblers.mp3 on the Cloud Storage Browser:
the first error is due the access permissions to the bucket, in the link you can
find Identity and Access Management (IAM) instructions, there you can check the roles
and permission to manage the bucket access.
The second error could be a consequence of the first one.
Please let me know if the information works for you.
You need to change options to this:
let options = {
public: true,
metadata: {
contentType: 'audio/mp3'
}
};
https://googleapis.dev/nodejs/storage/latest/global.html#CreateWriteStreamOptions
I've been struggling with the same issue for a while and in the end here is what solved it for me:
const stream = file.createWriteStream(options)
audio.pipe(stream)
stream.on('finish', () => {
// file APIs will now work
})
The problem is that audio.pipe(file.createWriteStream(options)) creates a stream that writes to the file asyncronously. At the moment when you are calling file.makePublic() there is no guarantee that that write stream has completed.

aws lambda s3 function isn't called inside alexa skills kit

I am trying to create a skill for Amazon Echo that will call a JSON file from AWS S3. When I call the code from s3 basic get function it works. And the Amazon Alexa code works on its own.
But when I call them together the function gets skipped. So for the following code the console gets called before and after s3.getObject(). But the middle one gets skipped. I do not understand why.
I also checked whether s3 was being called, and it is.
let aws = require('aws-sdk');
let s3 = new aws.S3({ apiVersion: '2006-03-01'});
function callS3() {
console.log('loading S3 function');
var myData = [];
const params = {
Bucket: 'cvo-echo',
Key: 'data.json'
};
console.log("trying to get s3");
s3.getObject(params, (err, data) => {
if (err) {
console.log('error in s3 get: \n' + err);
//const message = `Error getting object ${key} from bucket ${bucket}.
// Make sure they exist and your bucket is in same region as this function.
//console.log(message);
} else {
console.log('CONTENT TYPE: ', data.ContentType);
console.log('Data body: \n' + data.Body.toString());
myData = JSON.parse(data.Body.toString());
console.log('myData.length = ' + myData.length);
}
console.log('myData >> ' + myData);
});
console.log('finished callS3() func');
return myData;
}
This might be a control flow issue, I've worked with amazons sdk before and was running into similar issues. Try implementing async within your code to have a better control of what happens when. This way methods won't skip.
UPDATE: adding some code examples of what you could do.
function callS3(callback) {
console.log('loading S3 function');
var myData = [];
const params = {
Bucket: 'cvo-echo',
Key: 'data.json'
};
console.log("trying to get s3");
s3.getObject(params, (err, data) => {
if (err) {
console.log('error in s3 get: \n' + err);
//const message = `Error getting object ${key} from bucket ${bucket}.
// Make sure they exist and your bucket is in same region as this function.
//console.log(message);
callback(err,null);//callback the error.
} else {
console.log('CONTENT TYPE: ', data.ContentType);
console.log('Data body: \n' + data.Body.toString());
myData = JSON.parse(data.Body.toString());
console.log('myData.length = ' + myData.length);
console.log('myData >> ' + myData);
console.log('finished callS3() func');
//Include the callback inside of the S3 call to make sure this function returns until the S3 call completes.
callback(null,myData); // first element is an error and second is your data, first element is null if no error ocurred.
}
});
}
/*
This MIGHT work without async but just in case you can read more about
async.waterfall where functions pass down values to the next function.
*/
async.waterfall([
callS3()//you can include more functions here, the callback from the last function will be available for the next.
//myNextFunction()
],function(err,myData){
//you can use myData here.
})
It's a timing issue. Here is an example of loading a JSON file from an S3 share when a session is started.
function onLaunch(launchRequest, session, callback) {
var sBucket = "your-bucket-name";
var sFile = "data.json";
var params = {Bucket: sBucket, Key: sFile};
var s3 = new AWS.S3();
var s3file = s3.getObject(params)
new AWS.S3().getObject(params, function(err, data) {
if (!err) {
var json = JSON.parse(new Buffer(data.Body).toString("utf8"));
for(var i = 0; i < json.length; i++) {
console.log("name:" + json[i].name + ", age:" + json[i].age);
}
getWelcomeResponse(callback);
} else {
console.log(err.toString());
}
});
}