Scraping <meta> content with Cheerio - google-apps-script

Tanaike helped me with this amazing script using Cheerio. The original was for Letterboxd, but this one is to pull in a watchlist from Trakt.tv (sample watchlist).
As it is right now it pulls in the watched date and the title, but I'd also like to pull in the content from the meta tag for each item.
<meta content="8 Million Ways to Die (1986)" itemprop="name">
I tried using $('[itemprop="name"]').attr('content'); but it doesn't accept the .attr piece.
Here is the full script as it is now, returning the watched date in the Col1 and the title in Col2.
/**
* Returns Trakt watchlist by username
* #param pages enter the number of pages in the list. Default is 10
* #customfunction
*/
function TRAKT(pages=10) {
const username = `jerrylaslow`
const maxPage = pages;
const reqs = [...Array(maxPage)].map((_, i) => ({ url: `https://trakt.tv/users/`+ username +`/history/all/added?page=${i + 1}`, muteHttpExceptions: true }));
return UrlFetchApp.fetchAll(reqs).flatMap((r, i) => {
if (r.getResponseCode() != 200) {
return [["Values couldn't be retrieved.", reqs[i].url]];
}
const $ = Cheerio.load(r.getContentText());
const ar = $(`a.titles-link > h3.ellipsify, h4 > span.format-date`).toArray();
return [...Array(Math.ceil(ar.length / 2))].map((_) => {
const temp = ar.splice(0, 2);
const watchDate = Utilities.formatDate(new Date($(temp[1]).text().trim().replace(/T|Z/g, " ")),"GMT","yyyy-MM-dd");
const title = $(temp[0]).text().trim();
return [watchDate,title];
});
});
}
The values can be pulled with this, so I know there isn't any sort of blocking in play.
=IMPORTXML(
"https://trakt.tv/users/jerrylaslow/history",
"//meta[#itemprop='name']/#content")
Any help is appreciated.

In order to achieve your goal, in your script, how about the following modification?
Modified script:
function TRAKT(pages = 10) {
const username = `jerrylaslow`
const maxPage = pages;
const reqs = [...Array(maxPage)].map((_, i) => ({ url: `https://trakt.tv/users/` + username + `/history/all/added?page=${i + 1}`, muteHttpExceptions: true }));
return UrlFetchApp.fetchAll(reqs).flatMap((r, i) => {
if (r.getResponseCode() != 200) {
return [["Values couldn't be retrieved.", reqs[i].url]];
}
const $ = Cheerio.load(r.getContentText());
const ar = $(`a.titles-link > h3.ellipsify, h4 > span.format-date`).toArray();
return [...Array(Math.ceil(ar.length / 2))].map((_) => {
const temp = ar.splice(0, 2);
const c = $(temp[0]).parent('a').parent('div').parent('div').find('meta').toArray().find(ff => $(ff).attr("itemprop") == "name"); // Added
const watchDate = Utilities.formatDate(new Date($(temp[1]).text().trim().replace(/T|Z/g, " ")), "GMT", "yyyy-MM-dd");
const title = $(temp[0]).text().trim();
return [watchDate, title, c ? $(c).attr("content") : ""]; // Modified
});
});
}
When this modified script is run, the value of content is put to 3rd column. If you want to put it to other column, please modify return [watchDate, title, c ? $(c).attr("content") : ""];.

Related

How to edit a current goggle script to only return details from a particular folders in a shared drive

I have a script (thankyou Cooper) that I'm trying to use to generate a list of all folders and files from within one particular folder in a shared drive, however, the script is returning the following error:
Exception: Argument too large: value
gff # Folder Listing.gs:67
(anonymous) # Folder Listing.gs:72
gff # Folder Listing.gs:68
(anonymous) # Folder Listing.gs:72
gff # Folder Listing.gs:68
(anonymous) # Folder Listing.gs:72
gff # Folder Listing.gs:68
(anonymous) # Folder Listing.gs:72
gff # Folder Listing.gs:68
(anonymous) # Folder Listing.gs:72
I'm assuming the problem is the script is grabbing all folders and files within the shared drive file and it's far too large to run the script, so I'm trying to reduce the source size by targeting a particular folder (or just returning the folder names and not worry about files at all).
What can I change on this script reduce the size issue?
function sharedDriveTrees() {
const ss = SpreadsheetApp.openById("blah");//need to change the ssid for the output spreadsheet
const r = Drive.Drives.list();
const drives = JSON.parse(r).items;
const shts = ss.getSheets().filter((sh, i) => i < drives.length).filter(e => e);
var trees = [];
drives.forEach((obj, i) => {
obj["title"] = obj.name;
let ob =JSON.parse(Drive.Files.get(obj.id,{ supportsAllDrives: true, supportsTeamDrives: true }));
obj["alternateLink"] = ob.alternateLink;
Logger.log('Drive Title: %s Time: %s', obj.title, Utilities.formatDate(new Date(), ss.getSpreadsheetTimeZone(), "HH:mm:ss"));
shts[i].setName(`${obj.title}\n${Utilities.formatDate(new Date(),ss.getSpreadsheetTimeZone(),"yyyy.MM.dd HH:mm:ss")}`);
let tree = [];
CacheService.getScriptCache().put("tree", JSON.stringify(tree), 60);
level = 1;
gff(obj)
tree = JSON.parse(CacheService.getScriptCache().get("tree"));
let l = tree.reduce((a, c) => {
if (c.length > a) {
a = c.length;
}
return a;
}, 0);
tree.forEach((a, j) => {
if ((l - a.length) > 0) {
let arr = [...Array.from(new Array(l - a.length).keys(), x => "")];
tree[j] = a.concat(arr);
}
});
trees.push(tree);
const sh = shts[i];
sh.clearContents();
sh.getRange(1, 1, tree.length, tree[0].length).setValues(tree);
SpreadsheetApp.flush();
});
}
level = 1;
function gff(fobj) {
//Logger.log('Drive Title: %s', fobj.title);
const r = Drive.Children.list(fobj.id);
const fldrMime = "application/vnd.google-apps.folder";
let tree = JSON.parse(CacheService.getScriptCache().get("tree"));
let files = [];
let subfolders = [];
fobj["level"] = level;
let children = JSON.parse(r).items;
children.forEach((obj, i) => {
let o = JSON.parse(Drive.Files.get(obj.id, { supportsAllDrives: true, supportsTeamDrives: true }));
o["level"] = level;
if (o.mimeType == fldrMime) {
subfolders.push(o);
} else {
files.push(o);
}
});
//Logger.log('level: %s', level);
let arr1 = [...Array.from(new Array(level).keys(), x => { if (x < (level - 1)) { return '' } else { return `=HYPERLINK("${fobj.alternateLink}","${fobj.title}")` } })];
//Logger.log('arr1: %s', JSON.stringify(arr1));
tree.push(arr1)
if (files && files.length > 0) {
files.forEach(obj => {
let arr2 = [...Array.from(new Array(level + 1).keys(), x => { if (x < (level)) { return '' } else { return `=HYPERLINK("${obj.alternateLink}","${obj.title}")` } })];
//Logger.log('arr2: %s', JSON.stringify(arr2));
tree.push(arr2)
})
}
CacheService.getScriptCache().put("tree", JSON.stringify(tree), 60);
subfolders.forEach(obj => {
level++;
obj.level = level;
CacheService.getScriptCache().put("tree", JSON.stringify(tree), 60);
gff(obj);
tree = JSON.parse(CacheService.getScriptCache().get("tree"))
});
level--;
return;
}
Edit:
After checking the affected line I found out that the issue is happening at CacheService.getScriptCache().put("tree", JSON.stringify(tree), 60). The Cache Documentation explains that the limit for the second parameter value is 100KB. The original script creator was using the CacheService as kind of a global variable to save every iteration of the loop as it kept adding levels to the tree. Since your folder structure is pretty large it grew beyond the 100KB limit.
As far as I can tell there's no way to raise this limit so I rewrote a few lines to pass the object to the gff() function instead of using the cache.
I kept the hyperlinks, but I also added a couple commented lines that you can switch to just return folder names as you requested. You can find them at let arr1 = and let arr2 =. You can switch between them to see if there's a performance improvement. You could also just change those lines in the original code that uses the cache, but you may still eventually run into the limit:
function folderTrees() {
const ss = SpreadsheetApp.openById("<Your spreadsheet id>");//need to change the ssid for the output spreadsheet
const f = Drive.Files.get("<Folder ID>", { supportsAllDrives: true, supportsTeamDrives: true });
const obj = JSON.parse(f);
const sh = ss.getSheets()[0];
var trees = [];
Logger.log('Folder Title: %s Time: %s', obj.title, Utilities.formatDate(new Date(), ss.getSpreadsheetTimeZone(), "HH:mm:ss"));
sh.setName(`${obj.title}\n${Utilities.formatDate(new Date(), ss.getSpreadsheetTimeZone(), "yyyy.MM.dd HH:mm:ss")}`);
let tree = [];
level = 1;
tree = gff(obj, tree)
let l = tree.reduce((a, c) => {
if (c.length > a) {
a = c.length;
}
return a;
}, 0);
tree.forEach((a, j) => {
if ((l - a.length) > 0) {
let arr = [...Array.from(new Array(l - a.length).keys(), x => "")];
tree[j] = a.concat(arr);
}
});
trees.push(tree);
sh.clearContents();
sh.getRange(1, 1, tree.length, tree[0].length).setValues(tree);
SpreadsheetApp.flush();
}
level = 1;
function gff(fobj, treeobj) {
const r = Drive.Children.list(fobj.id);
const fldrMime = "application/vnd.google-apps.folder";
let tree = treeobj;
let files = [];
let subfolders = [];
fobj["level"] = level;
let children = JSON.parse(r).items;
children.forEach((obj, i) => {
let o = JSON.parse(Drive.Files.get(obj.id, { supportsAllDrives: true, supportsTeamDrives: true }));
o["level"] = level;
if (o.mimeType == fldrMime) {
subfolders.push(o);
} else {
files.push(o);
}
});
//first line adds the hyperlinks and the second one returns only text
let arr1 = [...Array.from(new Array(level).keys(), x => { if (x < (level - 1)) { return '' } else { return `=HYPERLINK("${fobj.alternateLink}","${fobj.title}")` } })];
//let arr1 = [...Array.from(new Array(level).keys(), x => { if (x < (level - 1)) { return '' } else { return fobj.title } })];
tree.push(arr1)
if (files && files.length > 0) {
files.forEach(obj => {
//first line adds the hyperlinks and the second one returns only text
let arr2 = [...Array.from(new Array(level + 1).keys(), x => { if (x < (level)) { return '' } else { return `=HYPERLINK("${obj.alternateLink}","${obj.title}")` } })];
//let arr2 = [...Array.from(new Array(level + 1).keys(), x => { if (x < (level)) { return '' } else { return obj.title } })];
tree.push(arr2)
})
}
subfolders.forEach(obj => {
level++;
obj.level = level;
tree = gff(obj, tree);
});
level--;
return tree;
}
And here's the output:
A few things to note:
You'll need to get the folder ID to plug into the f variable.
The original script looped through all the shared Drives and wrote each one's tree to a different sheet within your spreadsheet. Since you only seemed to want a single folder's tree now it just writes to the first sheet in the file.

How to decrypt AES with Google Apps Script

I am trying to decrypt AES with GAS. The target of decryption is a document file retrieved by Amazon Selling Partner API.
The key, iv, and URL are obtained by the API, and I want to decrypt the data downloaded by accessing the URL with the key and iv.
However, the decrypted text is either empty or garbled.
Can you please tell me what is wrong with the following code? The code uses cCryptoGS, which is a wrapper library for CryptoJS.
const decrypt_test = () => {
const url = 'https://tortuga-prod-fe.s3-us-west-2.amazonaws.com/%2FNinetyDays/amzn1.tortuga.3.5d4685fe-cdf1-4f37-8dfc-a25b85468e34.T1J5QXLEXAMPLE';
const response = UrlFetchApp.fetch(url);
const file = response.getContentText();
const key = 'xiZ8FGT6pYo49ZwfvAplJxKgO0qW46Morzs5aEXAMPLE';
const iv = 'aoGh0rhbB3ALlCFKiEXAMPLE';
const enc_key = cCryptoGS.CryptoJS.enc.Base64.parse(key);
const enc_iv = cCryptoGS.CryptoJS.enc.Base64.parse(iv);
const cipherParams = cCryptoGS.CryptoJS.lib.CipherParams.create({
ciphertext: file//cCryptoGS.CryptoJS.enc.Base64.parse(file)
});
console.log(`enc_key_length:${enc_key.words.length}`);
console.log(`enc_iv_length:${enc_iv.words.length}`);
const decryptedMessage = cCryptoGS.CryptoJS.AES.decrypt(cipherParams, enc_key, { iv: enc_iv, mode: cCryptoGS.CryptoJS.mode.CBC}).toString();
console.log(`decryptedMessage:${decryptedMessage}`);
return decryptedMessage;
};
[output]
2021/06/20 20:04:04 debug enc_key_length:8
2021/06/20 20:04:04 debug enc_iv_length:4
2021/06/20 20:04:04 debug decryptedMessage:bfc095f3ecec221e8585ceb68031078d25112f5f26ea2c1f80470f5f4f19f2e1c2cd94638e8666c3486fa29191b568bcd9e8d5a3bdcbbc05456f0567bb6cdae675fa044f94e560379d16b1d370cd7c4a9c5afbbcf4fde2694ed01c1b7950eaabc65e46c4640d8f0814bfe66e8ae65f7768136ac4615624be25373d665ee8fde82742e26664d7c09c61ac8994dc3052f0f22d5042f0b407d696e3c84a3906350dc60c46001ef7865d0c6594c57c5af22616688e028f52d4f12b538d0580c420fdcb0ee61287d4ee2629cd7d39f739d63e84dd75e948eaffb4383076f0c66997
The following code solved the problem
const decrypt_test = () => {
const url = 'https://tortuga-prod-fe.s3-us-west-2.amazonaws.com/%2FNinetyDays/EXAMPLE';
let options = {
'method': 'get',
'muteHttpExceptions': true,
};
const response = UrlFetchApp.fetch(url, options);
const file = response.getBlob().getBytes();
const key = 'xiZ8FGT6pYo49ZwfvAplJxKgO0qW46MoEXAMPLE';
const iv = 'aoGh0rhbB3ALlCFKiuJEXAMPLE';
const enc_key = cCryptoGS.CryptoJS.enc.Base64.parse(key);
const enc_iv = cCryptoGS.CryptoJS.enc.Base64.parse(iv);
const cipherParams = cCryptoGS.CryptoJS.lib.CipherParams.create({
ciphertext: cCryptoGS.CryptoJS.enc.Hex.parse(hexes(file))
});
const decryptedMessage = cCryptoGS.CryptoJS.AES.decrypt(cipherParams, enc_key,
{ iv: enc_iv, mode: cCryptoGS.CryptoJS.mode.CBC}).toString();
console.log(`decryptedMessage:${decryptedMessage}`);
const bin = bytes(decryptedMessage)
const myBlob = Utilities.newBlob(bin, MimeType.TEXT, "decrypted.csv");
DriveApp.createFile(myBlob);
};
const bytes = (hexstr) => {
ary = [];
for (var i = 0; i < hexstr.length; i += 2) {
ary.push(parseInt(hexstr.substr(i, 2), 16));
}
return ary;
}
const hexes = (ary) => {
return ary.map((e) => ( '00' + (e < 0 ? e += 0x0100 : e).toString(16)).slice(-2)).join('')
}

adding top 10 to a leveling system leaderboard

So I'm trying to add top 10 for my leaderboard this is the code that I'm using for the leveling system and leaderboard but I dont know how to add the top 10 and I need it because when you type the command it just spams because a lot of people have talked so please help.
client.on("message", async (message) => {
if (message.author.bot) return;
if (message.channel.type === "dm") {
return;
}
const dm = client.users.cache.get(message.author.id);
if (!db[message.author.id])
db[message.author.id] = {
userid: message.author.id,
xp: 0,
level: 0,
};
db[message.author.id].xp++;
let userInfo = db[message.author.id];
if (userInfo.xp > 99) {
userInfo.level++;
userInfo.xp = 0;
dm.send(
new Discord.MessageEmbed()
.setTitle(
`${levelup}Level up${levelup}\n${levelup}Level: ${userInfo.level} ${levelup}`
)
.setColor("#E2DF09")
.setTimestamp()
);
}
if (
message.content.toLowerCase().startsWith(prefix + "rank") ||
message.content.toLowerCase().startsWith(prefix + "level")
) {
let userInfo = db[message.author.id];
let member = message.mentions.members.first();
let embed = new Discord.MessageEmbed()
.setColor("#E2DF09")
.addField("Level", `${info}` + userInfo.level + `${info}`)
.addField("XP", `${info}` + userInfo.xp + "/100" + `${info}`)
.setFooter(
`${message.author.tag}`,
`${message.author.avatarURL({ dynamic: true })}`
);
if (!member) return message.channel.send(embed);
let memberInfo = db[member.id];
let embed2 = new Discord.MessageEmbed()
.setColor("#E2DF09")
.addField("Level", `${info}` + memberInfo.level + `${info}`)
.addField("XP", `${info}` + memberInfo.xp + "/100" + `${info}`);
message.channel.send(embed2);
} else if (
message.content.toLowerCase().startsWith(prefix + "leaderboard") ||
message.content.toLowerCase().startsWith(prefix + "lb")
) {
const embed = new Discord.MessageEmbed()
.setTitle("Leaderboard")
.setColor("#E2DF09");
const c = Object.entries(db).sort((a, b, d) => b[1].level - a[1].level);
for (const [key, value] of c) {
embed.addField(
`\u200B`,
`<#${value.userid}>\nLevel: ${value.level} | XP: ${value.xp}`
);
}
message.channel.send(embed);
}
fs.writeFile("./db/database.json", JSON.stringify(db), (error) => {
console.error();
});
});
and also this is how the json file looks like
I use the userid for the leaderboard tag
{"630812692659044352":{"userid":"630812692659044352","xp":31,"level":32}
This is how the leaderboard looks like but I want to add the numbers next to the username and for the 1st, 2nd, 3rd i want to add like a trophy or something but its not working out on my side .
Here is something I think would work (I tested it on an online site)
var ranks = {
'SOMEUSERID': {
userid: 'SOMEUSERID',
rank: 0,
xp: 45
},
'ANOTHERUSERID': {
userid: 'THEUSERID',
rank: 0,
xp: 70
}
}
//^^EXAMPLE RANKS, DONT ADD THESE^^
var leaderboard = []
for(const i in ranks) {
var person = ranks[i];
leaderboard.push(person)
}
//^^LOOP THROUGH ALL USERS^^
leaderboard.sort((b, a) => ((a.rank+1)*(a.xp)) - ((b.rank+1)*(b.xp)))
//^^SORT BY (RANK+1)*XP, in descending order^^
var ranksSorted = leaderboard.map(u => u.rank);
var xpSorted = leaderboard.map(u => u.xp)
var usersSorted = leaderboard.map(u => u.userid)
//ranksSorted, xpSorted, and usersSorted are synced
//...
//top 10 leaderboard within one string
var str = '';
for(let i = 0; i<=10 && i<leaderboard.length;i++) {
str += usersSorted[i] + '\n' + ranksSorted[i] + ' | ' + xpSorted[i] + '\n'
//YOU CAN ALSO ACCESS WHICH PLACE THEY ARE WITH i+1
}
So you don’t have to save it on string, instead of adding it to the string, you can do add embed field, like this:
for(let i = 0; i<=10 && i<leaderboard.length;i++) {
someEmbed.addField(client.users.cache.get(usersSorted[i]).tag, ranksSorted[i] + ' | ' + xpSorted[i])
}
EDIT: rank*xp wouldn’t work well. 1*28 for example is more than 2*13. Do Math.pow().
leaderboard.sort((b, a) => ((a.rank+1)*(a.xp)) - (Math.pow((b.rank+1), 10)*(b.xp)))
//instead of the previous leaderboard.sort
This way, rank 1 xp 99 is less than rank 2 xp 1

Extending script execution time beyond 5 minutes for reddit scraping

I'm attempting to gather all of the posts submitted to a particular subreddit using the code found here: https://www.labnol.org/internet/web-scraping-reddit/28369/ However the execution limit is reached well before this completes.
I am looking for a way to extend the run time of the script, with it ideally not needing my intervention at all once I click run.
const getThumbnailLink_ = url => {
if (!/^http/.test(url)) return '';
return `=IMAGE("${url}")`;
};
const getHyperlink_ = (url, text) => {
if (!/^http/.test(url)) return '';
return `=HYPERLINK("${url}", "${text}")`;
};
const writeDataToSheets_ = data => {
const values = data.map(r => [
new Date(r.created_utc * 1000),
r.title,
getThumbnailLink_(r.thumbnail),
getHyperlink_(r.url, 'Link'),
getHyperlink_(r.full_link, 'Comments')
]);
const sheet = SpreadsheetApp.getActiveSheet();
sheet.getRange(sheet.getLastRow() + 1, 1, values.length, values[0].length).setValues(values);
SpreadsheetApp.flush();
};
const isRateLimited_ = () => {
const response = UrlFetchApp.fetch('https://api.pushshift.io/meta');
const { server_ratelimit_per_minute: limit } = JSON.parse(response);
return limit < 1;
};
const getAPIEndpoint_ = (subreddit, before = '') => {
const fields = ['title', 'created_utc', 'url', 'thumbnail', 'full_link'];
const size = 10000;
const base = 'https://api.pushshift.io/reddit/search/submission';
const params = { subreddit, size, fields: fields.join(',') };
if (before) params.before = before;
const query = Object.keys(params)
.map(key => `${key}=${params[key]}`)
.join('&');
return `${base}?${query}`;
};
const scrapeReddit = (subreddit = 'AskMen') => {
let before = '';
do {
const apiUrl = getAPIEndpoint_(subreddit, before);
const response = UrlFetchApp.fetch(apiUrl);
const { data } = JSON.parse(response);
const { length } = data;
before = length > 0 ? String(data[length - 1].created_utc) : '';
if (length > 0) {
writeDataToSheets_(data);
}
} while (before !== '' && !isRateLimited_());
};
Generally it's a better practice to optimize your script to not reach the execution time defined by your quota. So in your case, one solution is to reduce the batch size per execution. In the reference you linked the code fetches 1000 posts per batch, your code fetches 10000.
Try with smaller values to see if the script execution time does not exceed quota anymore.
const getAPIEndpoint_ = (subreddit, before = '') => {
const fields = ['title', 'created_utc', 'url', 'thumbnail', 'full_link'];
const size = 1000;
const base = 'https://api.pushshift.io/reddit/search/submission';
const params = { subreddit, size, fields: fields.join(',') };
if (before) params.before = before;
const query = Object.keys(params)
.map(key => `${key}=${params[key]}`)
.join('&');
return `${base}?${query}`;
};
But if you have a business need to exceed your quota, you can upgrade to to either Google Workspace Basic, Business or Enterprise - depending on how much you need to increase your quota and how much you are willing to pay.
See here for more information about different accounts and pricing.

Export Form responses as csv Google Apps Scripts

Is there is a fast way to programmatically export all responses from a Google Form to a csv? Something like "Export responses to csv" invoked via Scripts.
Right now I'm doing it in a rock art way:
Iterate over the forms I want to export (~75)
Open each form var form = FormApp.openById(formId);
Get responses: var formReponses = form.getResponses(); (from 0 to 700 responses each form)
Iterate over responses and get item responses: var preguntes = formReponses[r].getItemResponses();
For each itemResponse, convert it to csv/json
Export responses to a drive file
This is extremly slow and additionally it hangs over and over, so I had to export responses in chunks of 50 responses and save them in Drive separated files. On next execution (after letting servers to cool down for a while), I'm executing the script again, skipping the number of responses found on the chunk file.
Additionally I'm not sure that Google keeps the responses order when doing form.getResponses(); (actually I've found that if the form has been modified, the order is not the same)
Is there a better way to do it?
Whith the help of #JackBrown I've managed to write a Chrome extension to download responses (maybe soon in github). This will wait for each download in the formIds object until finished and then prompt for the next one:
'use strict';
function startDownload() {
const formIds = {
'Downloads-subfolder-here': {
'Download-filename-here': '1-cx-aSAMrTK0IHsQkE... {form-id here}',
'Another-filename-here': '...-dnqdpnEso {form-id here}',
// ...
},
'Another-subfolder-here': {
'Download-filename-here': '1-cx-aSAMrTK0IHsQkE... {form-id here}',
'Another-filename-here': '...-dnqdpnEso {form-id here}',
// ...
},
};
const destFolders = Object.keys(formIds);
const downloads = [];
for (let t = 0, tl = destFolders.length; t < tl; t += 1) {
const destFolder = destFolders[t];
const forms = Object.keys(formIds[destFolder]);
for (let f = 0, fl = forms.length; f < fl; f += 1) {
const formName = forms[f];
downloads.push({
destFolder,
formName,
url: `https://docs.google.com/forms/d/${formIds[destFolder][formName]}/downloadresponses?tz_offset=-18000000`,
filename: `myfolder/${destFolder}/${formName.replace(/\//g, '_')}.csv`,
});
}
}
const event = new Event('finishedDownload');
const eventInterrupt = new Event('interruptedDownload');
let currId;
chrome.downloads.onChanged.addListener((downloadDelta) => {
if (downloadDelta.id === currId) {
if (downloadDelta.state && downloadDelta.state.current === 'complete') {
document.dispatchEvent(event);
} else if (downloadDelta.state && downloadDelta.state.current === 'interrupted') {
console.log(downloadDelta);
document.dispatchEvent(eventInterrupt);
}
}
});
downloads.reduce((promise, actual) => {
return promise.then((last) => (last ? new Promise((resolve) => {
const { url, filename, destFolder, formName } = actual;
function listener() {
document.removeEventListener('finishedDownload', listener);
document.removeEventListener('interruptedDownload', listener);
resolve(true);
};
function interrupt() {
document.removeEventListener('finishedDownload', listener);
document.removeEventListener('interruptedDownload', listener);
resolve(false);
}
console.log(`Processant ${destFolder}, ${formName}: ${url}`);
document.addEventListener('finishedDownload', listener);
document.addEventListener('interruptedDownload', interrupt);
chrome.downloads.download({ url, filename }, (downloadId) => {
currId = downloadId;
if (!downloadId) {
console.log();
console.log('Error downloading...');
console.log(runtime.lastError);
resolve();
}
});
}) : Promise.resolve(false)));
}, Promise.resolve(true));
}
chrome.browserAction.onClicked.addListener((/*tab*/) => startDownload());