WebSerial getPorts() method return nothing - google-chrome

I have tried Web Serial API.
If I use requestPort() method, like this :
<button id="connect">Connect</button>
<script>
const connectButton = document.getElementById("connect");
connectButton.addEventListener('click', async () => {
try {
const port = await navigator.serial.requestPort();
console.log(port)
} catch (e) {
console.error(e)
}
});
</script>
It's OK, to prompt the user for which device the site should be allowed to control.
But with getPorts() method, like this :
<script>
const connectButton = document.getElementById("connect");
connectButton.addEventListener('click', async () => {
try {
const ports = await navigator.serial.getPorts();
console.log(ports)
} catch (e) {
console.error(e)
}
});
</script>
I got nothing with ports.length == 0
Could anybody help this?
Thanks

A user must approve access to the device before it can be accessed. You initiate the request via:
navigator.serial.requestPort()
To get a list of ports that have already been selected/approved by the user from the permission popup you call:
navigator.serial.getPorts()
You can check out the https://webserial.io source for an example here:
https://github.com/williamkapke/webserial/blob/main/src/stores/connection.js#L41

Related

How to Host HTML on a different server than Heroku

I have my index.html and the necessary .js files on heroku. Everything works fine. Now, I don't want to send my users to "myappname.herokuapp.com", so I plan to use my own website to store the .html file, but when the user taps "submit" on my HTML form, I want to execute the Herok NodeJS code.
Here is what the html looks like
<script>
const form = document.querySelector("form");
form.addEventListener("submit", async (e) => {
e.preventDefault();
try {
displayStatus("processing...");
const request = new Request("/file-upload", {
method: "POST",
body: new FormData(form),
});
const res = await fetch(request);
const resJson = await res.json();
displayResult(resJson.result);
} catch (err) {
displayStatus("an unexpected error");
console.error(err);
}
});
function displayResult(result) {
const content = `Your ID: ${result.id}`;
displayStatus(content);
}
function displayStatus(msg) {
result.textContent = msg;
}
</script>
How can I call this "/file-upload" from my HTML that is located on "mywebsite.com/index.html" while the actual NodeJS logic runs on "myappname.herokuapp.com"
I've tried to replace the "/file-upload" with "myappname.herokuapp.com/file-upload" but it doesn't work.
Again, the goal is to use what I have on Heroku, but not have the users go to "myappname.herokuapp.com" instead they should go to "mywebsite.com/index.html"
Thank you
Actually, replacing "/file-upload" with "myappname.herokuapp.com/file-upload" did the trick. The only issue is my "const request = new Request" request returning an error all the time, but Heroku logs shows a successful execution of "file-upload"

How to bypass CSP(Content-Security-Policy) using puppeteer's API page.addScriptTag?

scenario:
I use puppeteer launched chrome in headless mode, and call page.addScriptTag with an cross-domain javascript file. Now if the opening site has csp set and restricts only same origin javascript tags, how can I bypass this using puppeteer API?
Use:
await page.setBypassCSP(true)
Documentation
This is my first stackoverflow contribution so have mercy on me. I found this work around to allow you to get past CSP, Here.
The basic idea is that you intercept page requests and use a library like node-fetch to make the request and disable the CSP header when passing it back to chrome.
Here's the snippet that initially came from the github issue tracker.
Replace "example.com" with the website that needs to have CSP disabled.
const fetch = require('node-fetch')
const requestInterceptor = async (request) => {
try {
const url = request.url()
const requestHeaders = request.headers()
const acceptHeader = requestHeaders.accept || ''
if (url.includes("example.com") && (acceptHeader.includes('text/html'))) {
const cookiesList = await page.cookies(url)
const cookies = cookiesList.map(cookie => `${cookie.name}=${cookie.value}`).join('; ')
delete requestHeaders['x-devtools-emulate-network-conditions-client-id']
if (requestHeaders.Cookie) {
requestHeaders.cookie = requestHeaders.Cookie
delete requestHeaders.Cookie
}
const theseHeaders = Object.assign({'cookie': cookies}, requestHeaders, {'accept-language': 'en-US,en'})
const init = {
body: request.postData(),
headers: theseHeaders,
method: request.method(),
follow: 20,
}
const result = await fetch(
url,
init,
)
const resultHeaders = {}
result.headers.forEach((value, name) => {
if (name.toLowerCase() !== 'content-security-policy') {
resultHeaders[name] = value
} else {
console.log('CSP', `omitting CSP`, {originalCSP: value})
}
})
const buffer = await result.buffer()
await request.respond({
body: buffer,
resultHeaders,
status: result.status,
})
} else {
request.continue();
}
} catch (e) {
console.log("Error while disabling CSP", e);
request.abort();
}
}
await page.setRequestInterception(true)
page.on('request', requestInterceptor)

Possible to run Headless Chrome/Chromium in a Google Cloud Function?

Is there any way to run Headless Chrome/Chromium in a Google Cloud Function? I understand I can include and run statically compiled binaries in GCF. Can I get a statically compiled version of Chrome that would work for this?
The Node.js 8 runtime for Google Cloud Functions now includes all the necessary OS packages to run Headless Chrome.
Here is a code sample of an HTTP function that returns screenshots:
Main index.js file:
const puppeteer = require('puppeteer');
exports.screenshot = async (req, res) => {
const url = req.query.url;
if (!url) {
return res.send('Please provide URL as GET parameter, for example: ?url=https://example.com');
}
const browser = await puppeteer.launch({
args: ['--no-sandbox']
});
const page = await browser.newPage();
await page.goto(url);
const imageBuffer = await page.screenshot();
await browser.close();
res.set('Content-Type', 'image/png');
res.send(imageBuffer);
}
and package.json
{
"name": "screenshot",
"version": "0.0.1",
"dependencies": {
"puppeteer": "^1.6.2"
}
}
I've just deployed a GCF function running headless Chrome. A couple takeways:
you have to statically compile Chromium and NSS on Debian 8
you have to patch environment variables to point to NSS before launching Chromium
performance is much worse than what you'd get on AWS Lambda (3+ seconds)
For 1, you should be able to find plenty of instructions online.
For 2, the code that I'm using is the following:
static executablePath() {
let bin = path.join(__dirname, '..', 'bin', 'chromium');
let nss = path.join(__dirname, '..', 'bin', 'nss', 'Linux3.16_x86_64_cc_glibc_PTH_64_OPT.OBJ');
if (process.env.PATH === undefined) {
process.env.PATH = path.join(nss, 'bin');
} else if (process.env.PATH.indexOf(nss) === -1) {
process.env.PATH = [path.join(nss, 'bin'), process.env.PATH].join(':');
}
if (process.env.LD_LIBRARY_PATH === undefined) {
process.env.LD_LIBRARY_PATH = path.join(nss, 'lib');
} else if (process.env.LD_LIBRARY_PATH.indexOf(nss) === -1) {
process.env.LD_LIBRARY_PATH = [path.join(nss, 'lib'), process.env.LD_LIBRARY_PATH].join(':');
}
if (fs.existsSync('/tmp/chromium') === true) {
return '/tmp/chromium';
}
return new Promise(
(resolve, reject) => {
try {
fs.chmod(bin, '0755', () => {
fs.symlinkSync(bin, '/tmp/chromium'); return resolve('/tmp/chromium');
});
} catch (error) {
return reject(error);
}
}
);
}
You also need to use a few required arguments when starting Chrome, namely:
--disable-dev-shm-usage
--disable-setuid-sandbox
--no-first-run
--no-sandbox
--no-zygote
--single-process
I hope this helps.
As mentioned in the comment, work is being done on a possible solution to running a headless browser in a cloud function. A directly applicable discussion:"headless chrome & aws lambda" can be read on Google Groups.
The question at. had was can you run headless chrome or chromium in Firebase Cloud Functions... the answer is NO! since the node.js project will not have access any chrome/chromium executables and therefore will fail! (TRUST ME - I've Tried!).
A better solutions is to use the Phantom npm package, which uses PhantomJS under the hood:
https://www.npmjs.com/package/phantom
Docs and info can be found here:
http://amirraminfar.com/phantomjs-node/#/
or
https://github.com/amir20/phantomjs-node
The site i was trying to crawl had implemented screen scraping software, the trick is to wait for the page to load by searching for expected string, or regex match, i.e. i do a regex for a , if you need a regex of any complexity made for you - get in touch at https://AppLogics.uk/ - starting at £5 (GPB).
here is a typescript snippet to make the http or https call:
const phantom = require('phantom');
const instance: any = await phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page: any = await instance.createPage();
const status = await page.open('https://somewebsite.co.uk/');
const content = await page.property('content');
same again in JavaScript:
const phantom = require('phantom');
const instance = yield phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page = yield instance.createPage();
const status = yield page.open('https://somewebsite.co.uk/');
const content = yield page.property('content');
Thats the easy bit! if its a static page your pretty much done and you can parse the HTML into something like the cheerio npm package: https://github.com/cheeriojs/cheerio - an implementation of core JQuery designed for servers!
However if it is a dynamically loading page, i.e. lazy loading, or even anti-scraping methods, you will need to wait for the page to update by looping and calling the page.property('content') method and running a text search or regex to see if your page has finished loading.
I have created a generic asynchronous function returning the page content (as a string) on success and throws an exception on failure or timeout. It takes as parameters the variables for the page, text (string to search for that indicates success), error (string to indicate failure or null to not check for error), and timeout (number - self explanatory):
TypeScript:
async function waitForPageToLoadStr(page: any, text: string, error: string, timeout: number): Promise<string> {
const maxTime = timeout ? (new Date()).getTime() + timeout : null;
let html: string = '';
html = await page.property('content');
async function loop(): Promise<string>{
async function checkSuccess(): Promise <boolean> {
html = await page.property('content');
if (!isNullOrUndefined(error) && html.includes(error)) {
throw new Error(`Error string found: ${ error }`);
}
if (maxTime && (new Date()).getTime() >= maxTime) {
throw new Error(`Timed out waiting for string: ${ text }`);
}
return html.includes(text)
}
if (await checkSuccess()){
return html;
} else {
return loop();
}
}
return await loop();
}
JavaScript:
function waitForPageToLoadStr(page, text, error, timeout) {
return __awaiter(this, void 0, void 0, function* () {
const maxTime = timeout ? (new Date()).getTime() + timeout : null;
let html = '';
html = yield page.property('content');
function loop() {
return __awaiter(this, void 0, void 0, function* () {
function checkSuccess() {
return __awaiter(this, void 0, void 0, function* () {
html = yield page.property('content');
if (!isNullOrUndefined(error) && html.includes(error)) {
throw new Error(`Error string found: ${error}`);
}
if (maxTime && (new Date()).getTime() >= maxTime) {
throw new Error(`Timed out waiting for string: ${text}`);
}
return html.includes(text);
});
}
if (yield checkSuccess()) {
return html;
}
else {
return loop();
}
});
}
return yield loop();
});
}
I have personally used this function like this:
TypeScript:
try {
const phantom = require('phantom');
const instance: any = await phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page: any = await instance.createPage();
const status = await page.open('https://somewebsite.co.uk/');
await waitForPageToLoadStr(page, '<div>Welcome to somewebsite</div>', '<h1>Website under maintenance, try again later</h1>', 1000);
} catch (error) {
console.error(error);
}
JavaScript:
try {
const phantom = require('phantom');
const instance = yield phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page = yield instance.createPage();
yield page.open('https://vehicleenquiry.service.gov.uk/');
yield waitForPageToLoadStr(page, '<div>Welcome to somewebsite</div>', '<h1>Website under maintenance, try again later</h1>', 1000);
} catch (error) {
console.error(error);
}
Happy crawling!

Login Service implementation in angularjs not working

This is my controller which is calling the login service
mod.controller("loginCtrl",function($scope,loginService,$http)
{
$scope.Userlogin = function()
{
var User = {
userid :$scope.uname,
pass:$scope.pass
};
var res = UserloginService(User);
console.log(res);
alert("login_succ");
}
});
And this is the login service code which takes the User variable and checks for username & password
mod.service("loginService",function($http,$q) {
UserloginService = function(User) {
var deffered = $q.defer();
$http({
method:'POST',
url:'http://localhost:8080/WebApplication4_1/login.htm',
data:User
}).then(function(data) {
deffered.resolve(data);
}).error(function(status) {
deffered.reject({
status:status
});
});
return deffered.promise;
// var response = $http({
//
// method:"post",
// url:"http://localhost:8080/WebApplication4_1/login.htm",
// data:JSON.stringify(User),
// dataType:"json"
// });
// return "Name";
}
});
I have created a rest api using springs which upon passing json return back the username and password in json like this
Console shows me this error for angular
You need to enable CORS for your application for guidance see this link
https://htet101.wordpress.com/2014/01/22/cors-with-angularjs-and-spring-rest/
I prefer to use Factory to do what you're trying to do, which would be something like this:
MyApp.factory('MyService', ["$http", function($http) {
var urlBase = "http://localhost:3000";
return {
getRecent: function(numberOfItems) {
return $http.get(urlBase+"/things/recent?limit="+numberOfItems);
},
getSomethingElse: function(url) {
return $http.get(urlBase+"/other/things")
},
search: function (searchTerms) {
return $http.get(urlBase+"/search?q="+searchTerms);
}
}
}]);
And then in your controller you can import MyService and then use it in this way:
MyService.getRecent(10).then(function(res) {
$scope.things = res.data;
});
This is a great way to handle it, because you're putting the .then in your controller and you are able to control the state of the UI during a loading state if you'd like, like this:
// initialize the loading var, set to false
$scope.loading = false;
// create a reuseable update function, and inside use a promise for the ajax call,
// which is running inside the `Factory`
$scope.updateList = function() {
$scope.loading = true;
MyService.getRecent(10).then(function(res) {
$scope.loading = false;
$scope.things = res.data;
});
};
$scope.updateList();
The error in the console shows two issues with your code:
CORS is not enabled in your api. To fix this you need to enable CORS using Access-Control-Allow-Origin header to your rest api.
Unhandled rejection error, as the way you are handling errors with '.error()' method is deprecated.
'Promise.error()' method is deprecated according to this and this commit in Angular js github repo.
Hence you need to change the way you are handling errors as shown below :
$http().then(successCallback, errorCallback);
function successCallback (res) {
return res;
}
function errorCallback (err) {
return err;
}
One more thing in your code which can be avoided is you have defined a new promise and resolving it using $q methods, which is not required. $http itself returns a promise by default, which you need not define again inside it to use it as a Promise. You can directly use $http.then().

WebRTC: Connecting multiple listeners to one client, one at a time

I have been trying to get WebRTC to function with a broadcaster and multiple listeners but am stuck when it comes to transferal descriptions and candidates via signalling (with nodejs & socket.io).
I can get the process working between two browsers with a simple nodejs socket app which simply broadcasts the descriptions and candidates to other already connected clients, but when I attempt to store a description and connect with a newly opened browser, nothing happens.
What I basically need to understand is what do I need to provide to one browser, in order for it to begin communicating with another? The project I am working on requires the ability for listeners to join rooms, authenticate, and begin listening to whatever media is being sent.
Below is my client side code:
var audioContext = new webkitAudioContext()
var client = null
var configuration =
{
'iceServers':
[{
'url': 'stun:stun.example.org'
}]
}
$(function ()
{
window.RTCPeerConnection = window.RTCPeerConnection || window.webkitRTCPeerConnection || window.mozRTCPeerConnection
client = new RTCPeerConnection(configuration, { optional:[ { RtpDataChannels: true } ]})
client.onnegotiationneeded = function ()
{
console.log('Negotiation needed')
createOffer()
}
client.onicecandidate = function (event)
{
console.log('onicecandidate')
socket.emit('candidate', JSON.stringify({ 'candidate': event.candidate }))
}
client.onaddstream = function (event)
{
console.log('onaddstream')
$('#player').attr('src', URL.createObjectURL(event.stream))
player.play()
}
socket.on('candidate', function (event)
{
candidate(event)
})
socket.on('description', function (message)
{
if(!client) { return }
client.setRemoteDescription(new RTCSessionDescription(message.sdp), function () {
if (client.remoteDescription.type == 'offer')
client.createAnswer(function (description)
{
client.setLocalDescription(description, function ()
{
socket.emit('description', JSON.stringify({ 'sdp':client.localDescription }))
})
}, function (err)
{
console.log('error: ' + err)
})
}, function(err)
{
console.log('error: ' + err)
})
})
addStream()
})
function createOffer ()
{
if(!client) { return; }
client.createOffer(function (description)
{
console.log(description)
client.setLocalDescription(description, function ()
{
socket.emit('description', JSON.stringify({ 'sdp': client.localDescription }))
console.log('set local description')
})
})
}
function candidate (message)
{
if(message.candidate)
{
console.log('candidate')
client.addIceCandidate(new RTCIceCandidate(message.candidate))
}
}
function addStream ()
{
navigator.webkitGetUserMedia({audio: true, video: false}, function(stream)
{
client.addStream(stream)
})
}
And my signalling part of my server as it currently stands:
io.on 'connection', (socket) ->
socket.on 'description', (data) ->
parsed = JSON.parse data
socket.broadcast.emit 'description', parsed
socket.on 'candidate', (candidate) ->
parsed = JSON.parse candidate
socket.broadcast.emit 'candidate', parsed
I'd appreciate any insight into this. Thanks.
The "PeerConnection" as the name indicates can be used with only one other peer. You cannot cache the offer SDP generated by one PeerConnection instance to use it with more than one other peers.
In your case, you must create a PeerConnection for each browser that you want to send/receive audio and video from and then exchange the corresponding SDP offer and answers with those browsers via your signaling mechanism.
Please feel free to go through some of the links I have mentioned here to understand how WebRTC works.