How to write firestore batch in react-redux-firebase - react-redux-firebase

In react-redux-firebase, how do I do a batch write similar to that shown in the firestore docs? (see below)
// Get a new write batch
var batch = db.batch();
// Set the value of 'NYC'
var nycRef = db.collection("cities").doc("NYC");
batch.set(nycRef, {name: "New York City"});
// Update the population of 'SF'
var sfRef = db.collection("cities").doc("SF");
batch.update(sfRef, {"population": 1000000});
// Delete the city 'LA'
var laRef = db.collection("cities").doc("LA");
batch.delete(laRef);
// Commit the batch
batch.commit().then(function () {
// ...
});

Something like this would work. Don't forget that react-redux-firebase and redux-firestore extend the original implementations of firebase and firestore respectively.
const Counter = () => {
const firestore = useFirestore()
const batch = firestore.batch()
const nycRef = firestore.get({collection: 'cities', doc: 'NYC'})
batch.set(nycRef, {name: 'New York City'})
const sfRef = firestore.get({collection: 'cities', doc: 'SF'})
batch.update(sfRef, {population: 10000000})
const laRef = firestore.get({collection: 'cities', doc: 'LA'})
firestore.delete(laRef)
const runBatch = async () => await batch.commit()
return <button onClick={runBatch}>Attempt Batch</button>
}

Related

Flutter data.json keeps being overwritten

So I am working with Flutter but each time I Hot Restart the app it overwrites my data.json.
// reference one of the data.json
regulatorAsync(licenseText) async {
Directory dir = await getApplicationDocumentsDirectory();
File file = File('${dir.path}/data1.json');
if (!await file.exists()) {
print("File doesn't exist");
// if it doesn't exist, create it
file = await file.create();
file = await file.writeAsString(await file.readAsString());
}
if (await file.readAsString() == "") {
print("File is empty");
file = await file.writeAsString('{"newuser": true}');
}
var json = jsonDecode(await file.readAsString());
print(json);
var a = regulator(json, licenseText);
return a;
}
//reference 2
onPressed: () async {
// write data to file
var data = await rootBundle.loadString('lib/mainapp/data.json');
var js = jsonDecode(data);
js["newuser"] = false;
var js2 = jsonEncode(js);
// get the path to the document directory.
Directory tempDir = await getTemporaryDirectory();
var appDocPath = tempDir.path;
print(js2);
var file = await File('$appDocPath/data1.json').writeAsString(js2);
print(file.readAsStringSync());
Navigator.of(context).pushReplacementNamed('/AllowPerms');
},
I know for sure there are no other refrences I even changed the names of the files to data1.json. I get back {"newusers": true} while with the onPress it should have been set to false.

How do I recurrsively call multiple URLs using Puppeteer and Headless Chrome?

I am trying to write a program to scan multiple URLs at the same time (parallelizaiton) and I have extracted sitemap and stored it as an array in a Variable as shown below. But i am unable to open using Puppeteer. I am getting the below error:
originalMessage: 'Cannot navigate to invalid URL'
My code below. Can someone please help me out .
const sitemapper = require('#mastixmc/sitemapper');
const SitemapXMLParser = require('sitemap-xml-parser');
const url = 'https://edition.cnn.com/sitemaps/sitemap-section.xml';
/*If sitemapindex (link of xml or gz file) is written in sitemap, the URL will be accessed.
You can optionally specify the number of concurrent accesses and the number of milliseconds after processing and access to resume processing after a delay.
*/
const options = {
delay: 3000,
limit: 50000
};
const sitemapXMLParser = new SitemapXMLParser(url, options);
sitemapXMLParser.fetch().then(result => {
var locs = result.map(value => value.loc)
var locsFiltered = locs.toString().replace("[",'<br>');
const urls = locsFiltered
console.log(locsFiltered)
const puppeteer = require("puppeteer");
async function scrapeProduct(url) {
const urls = locsFiltered
const browser = await puppeteer.launch({
headless: false
});
for (i = 0; i < urls.length; i++) {
const page = await browser.newPage();
const url = urls[i];
const promise = page.waitForNavigation({
waitUntil: "networkidle2"
});
await page.goto(`${url}`);
}};
scrapeProduct();
});
You see invalid URL because you've convert an array into URL string by wrong method.
These line is a better one:
// var locsFiltered = locs.toString().replace("[",'<br>') // This is wrong
// const urls = locsFiltered // So value is invalid
// console.log(locsFiltered)
const urls = locs.map(value => value[0]) // This is better
So to scrape CNN sites, i've added puppeteer-cluster for speed:
const { Cluster } = require('puppeteer-cluster')
const sitemapper = require('#mastixmc/sitemapper')
const SitemapXMLParser = require('sitemap-xml-parser')
const url = 'https://edition.cnn.com/sitemaps/sitemap-section.xml'
async function scrapeProduct(locs) {
const urls = locs.map(value => value[0])
const cluster = await Cluster.launch({
concurrency: Cluster.CONCURRENCY_CONTEXT,
maxConcurrency: 2, // You can set this to any number you like
puppeteerOptions: {
headless: false,
devtools: false,
args: [],
}
})
await cluster.task(async ({ page, data: url }) => {
await page.goto(url, {timeout: 0, waitUntil: 'networkidle2'})
const screen = await page.screenshot()
// Store screenshot, do something else
})
for (i = 0; i < urls.length; i++) {
console.log(urls[i])
await cluster.queue(urls[i])
}
await cluster.idle()
await cluster.close()
}
/******
If sitemapindex (link of xml or gz file) is written in sitemap, the URL will be accessed.
You can optionally specify the number of concurrent accesses and the number of milliseconds after processing and access to resume processing after a delay.
*******/
const options = {
delay: 3000,
limit: 50000
}
const sitemapXMLParser = new SitemapXMLParser(url, options)
sitemapXMLParser.fetch().then(async result => {
var locs = result.map(value => value.loc)
await scrapeProduct(locs)
})

CheerioJS to parse data on script tag

I've been trying to parse the data that is in the script tag using cheerio however It's been difficult for the following reasons.
Can't parse string that is generated into JSON because of html-entities
More Info:
Also what is strange to me is that you have to re-load the content into cheerio a second time to get the text.
Your welcome to fork this replit or copy and paste the code to try it yourself
https://replit.com/#Graciasc/Cheerio-Script-Parse
const cheerio = require('cheerio')
const {decode} = require('html-entities')
const html = `
<body>
<script type="text/javascript"src="/data/common.0e95a19724a68c79df7b.js"></script>
<script>require("dynamic-module-registry").set("from-server-context", JSON.parse("\x7B\x22data\x22\x3A\x7B\x22available\x22\x3Atrue,\x22name\x22\x3A"Gracias"\x7D\x7D"));</script>
</body>
`;
const $ = cheerio.load(html, {
decodeEntities: false,
});
const text = $('body').find('script:not([type="text/javascript"])');
const cheerioText = text.eq(0).html();
//implement a better way to grab the string
const scriptInfo = cheerio.load(text.eq(0).html()).text();
const regex = new RegExp(/^.*?JSON.parse\(((?:(?!\)\);).)*)/);
const testing = regex.exec(scriptInfo)[1];
// real output:
//\x7B\x22data\x22\x3A\x7B\x22available\x22\x3Atrue,\x22name\x22\x3A"Gracias"\x7D\x7D when logged
console.log(testing)
// Not Working
const json = JSON.parse(testing)
const decoding = decode(testing)
// same output as testing
console.log(decoding)
// Not working
console.log('decode', JSON.parse(decoding))
//JSON
{ Data: { available: true, name: 'Gracias' } }
A clean solution is to use JSDOM
repl.it link( https://replit.com/#Graciasc/Cheerio-Script-Parse#index.js)
const { JSDOM } = require('jsdom')
const dom = new JSDOM(`<body>
<script type="text/javascript"src="/data/common.0e95a19724a68c79df7b.js"></script>
<script>require("dynamic-module-registry").set("from-server-context", JSON.parse("\x7B\x22data\x22\x3A\x7B\x22available\x22\x3Atrue,\x22name\x22\x3A"Gracias"\x7D\x7D"));</script>
</body>`)
const serializedDom = dom.serialize()
const regex = new RegExp(/^.*?JSON.parse\("((?:(?!"\)\);).)*)/gm);
const jsonString = regex.exec(serializedDom)[1];
console.log(JSON.parse(jsonString))
// output: { data: { available: true, name: 'Gracias' } }

Add Revit Levels and 2D Minimap to your 3D for local viewer

In our project, we use a local viewer and we do not receive data from a request, but we have it locally.
And getting data from our local files it is impossible to insert it into the panel.
And the call doc.downloadAecModelData() returns null.
See more https://forge.autodesk.com/blog/add-revit-levels-and-2d-minimap-your-3d
Autodesk.Viewing.Initializer(options, async () => {
const config3d = {
useConsolidation: true,
useADP: false,
extensions: [
"Autodesk.Viewing.MarkupsCore",
"MarkupExtension",
"Autodesk.AEC.LevelsExtension",
"Autodesk.AEC.Minimap3DExtension"
]
};
const init_div = document.getElementById("init_div");
const svf_path = "/storage/" + decodeURIComponent(props.search.split("&&")[1]);
const p = "/storage/" + props.search.split("&&")[1];
const viewer = new Autodesk.Viewing.GuiViewer3D(init_div, config3d);
Autodesk.Viewing.endpoint.getItemApi = (endpoint, derivativeUrn, api) => {
return (
"/storage/5bf49c73e2ac26b2756b642ae1b29037/aa1bb3d5baaa229e0c15e674adb7aceec1a0fb061e9c0288d9979f801fb6460d.svf/Resource" +
decodeURIComponent(p.split("Resource")[1])
);
};
const paths = svf_path.split("/");
const [dest, svf_dir] = [paths[2], paths[3]];
const url = http://localhost:3000/api/viewer/dest/${dest}/svf/${svf_dir}/manifest;
const response = await fetch(url);
const manifest = await response.json();
const aec_url = /api/viewer/dest/${dest}/svf/${svf_dir}/aec;
const response_aec = await fetch(aec_url);
const aec = JSON.parse(await response_aec.json());
// console.log(aec);
const viewerDocument = new Autodesk.Viewing.Document(manifest);
const a = await viewerDocument.downloadAecModelData();
console.log(a);
console.log(manifest);
const viewable = viewerDocument.getRoot().getDefaultGeometry();
viewable.data.aec_odel_data = aec;
// console.log(viewable);
await Autodesk.Viewing.Initializer(options, () => {
// const aec = viewable.parent.children[1].data; //.urn//.split("Resource")[1];
// for resize and use viewer outer useEffect
props.set_loc_viewer(viewer);
viewer.start();
viewer.loadDocumentNode(viewerDocument, viewable, options);
});
As i know you can only use doc.downloadAecModelData() after document loaded, like this:
onDocumentLoadSuccess(doc: Autodesk.Viewing.Document) {doc.downloadAecModelData(); }

Forge Viewer: Properties Window

The Properties window does not populate any properties even though the 2D view has properties info for the selected room
Here is the function that loads the model. what am I missing?
function loadModel() {
var initialViewable = viewables[indexViewable];
var svfUrl = lmvDoc.getViewablePath(initialViewable);
var modelOptions = {
sharedPropertyDbPath: lmvDoc.getFullPath(lmvDoc.getRoot().findPropertyDbPath())
};
viewer.loadModel(svfUrl, modelOptions, onLoadModelSuccess, onLoadModelError);
}
One line missing in your code, please try the following instead:
var sharedDbPath = initialViewable.findPropertyDbPath();
sharedDbPath = lmvDoc.getFullPath( sharedDbPath );
var modelOptions = {
sharedPropertyDbPath: sharedDbPath
};
However, you should not need to specify the sharedPropertyDbPath manually now. You can take advantage of the Viewer3D#loadDocumentNode to load the model directly. It will automatically determine the path for you. (started from v7 viewer)
const initialViewable = viewables[0];
viewer.loadDocumentNode( lmvDoc, initialViewable, loadOptions )
.then( onLoadModelSuccess )
.catch( onLoadModelError );