Using the looker embedsdk the connect() promise never resolves using an sso url - embed

Using the looker embedsdk the connect() promise never resolves using an sso url.
Using the debugger in my dev environment I can see at this point in the #looker/chatty code there is a switch against evt.data.action which is always undefined. The debug in the line above fires many times but I just see:
looker:chatty:host window received +0ms undefined undefined
In the debugger I can see evt.data = {"type":"page:changed","page":{"type":"dashboard","absoluteUrl":"https://company.looker.com:19999/embed/dashboards/104?embed_domain=http://localhost:3100","url":"/embed/dashboards/104?embed_domain=http://localhost:3100"}}
So evt.data.data and evt.data.action are both undefined.
The iframe loads the dashboard fine. I’ve added localhost:3100 to the embed allow list and I’m including ?embed_domain=http://localhost:3100&sdk=2 in the target url when creating the sso url.
This is my code:
const [embed, setEmbed] = useState<LookerEmbedDashboard>();
const embedUrl = useLookerSSOEmbed(dashboard);
const embedCtrRef = useCallback(
(el) => {
if (el && embedUrl) {
el.innerHTML = '';
LookerEmbedSDK.init('https://company.looker.com:19999');
LookerEmbedSDK.createDashboardWithUrl(embedUrl)
.appendTo(el)
.build()
.connect()
.then((dashboard) => {
setEmbed(dashboard);
console.log('dashboard', dashboard);
})
.catch((error) => console.log('error', error));
}
},
[embedUrl]
);
return <EmbedContainer ref={embedCtrRef}></EmbedContainer>;
};
This is the part of the code in #looker/chatty host.js That it dies in. The single case in the switch never hits since evt.data.action is always undefinded:
var windowListener = function (evt) {
if (!_this.isValidMsg(evt)) {
// don't reject here, since that breaks the promise resolution chain
ChattyHost._debug('window received invalid', evt);
return;
}
ChattyHost._debug('window received', evt.data.action, evt.data.data);
switch (evt.data.action) {
case client_messages_1.ChattyClientMessages.Syn:
if (_this._port) {
// If targetOrigin is set and we receive another Syn, the frame has potentially
// navigated to another valid webpage and we should re-connect
if ((_this._targetOrigin && _this._targetOrigin === '*') ||
_this._targetOrigin === evt.origin) {
ChattyHost._debug('reconnecting to', evt.origin);
_this._port.close();
}
else {
ChattyHost._debug('rejected new connection from', evt.origin);
return;
}
}
_this._port = evt.ports[0];
_this._port.onmessage = eventListener;
_this.sendMsg(host_messages_1.ChattyHostMessages.SynAck);
_this._state = ChattyHostStates.SynAck;
break;
}
};

Related

object keys are undefined in if conditional, but inside the if statement I can access it

As the title states, I have a variable which is a javascript object, i'm comparing it with another js object by stringifying them. The problem is that the variable is completely accessible without calling the keys, so these
if(JSON.stringify(response) == JSON.stringify(lastcmd))
if(JSON.stringify(response.id) == JSON.stringify(lastcmd))
work perfectly fine, but accessing lastcmd's id key will cause it to throw undefined.
if(JSON.stringify(response) == JSON.stringify(lastcmd.id))
full code link here
Edit: Here's the JSON
{ "id" : "001", "app": "msgbox", "contents": { "title": "Newpaste", "message": "I'm a edited paste!" } }
Edit2: Here's the code on the post
const { BrowserWindow, app, dialog, ClientRequest } = require("electron");
const axios = require("axios");
const url = require("url");
let win = null;
let lastcmd;
function grabCurrentInstructions(fetchurl) {
return axios
.get(fetchurl)
.then(response => {
// handle success
//console.log(response.data);
return response.data;
})
.catch(function(error) {
// handle error
console.log(error);
});
}
function boot() {
//console.log(process.type);
win = new BrowserWindow({
resizable: true,
show: false,
frame: false
});
win.loadURL(`file://${__dirname}/index.html`);
//Loop everything in here every 10 seconds
var requestLoop = setInterval(getLoop, 4000);
function getLoop() {
grabCurrentInstructions("https://pastebin.com/raw/i9cYsAt1").then(
response => {
//console.log(typeof lastcmd);
//console.log(typeof response);
if (JSON.stringify(response.app) == JSON.stringify(lastcmd.app)) {
console.log(lastcmd.app);
clearInterval(requestLoop);
requestLoop = setInterval(getLoop, 4000);
} else {
lastcmd = response;
switch (response.app) {
case "msgbox":
dialog.showMessageBox(response.contents);
//console.log(lastcmd);
clearInterval(requestLoop);
requestLoop = setInterval(getLoop, 1000);
}
}
}
);
}
}
app.on("ready", boot);
And here's the error:
(node:7036) UnhandledPromiseRejectionWarning: TypeError: Cannot read property 'id' of undefined
at grabCurrentInstructions.then.response (C:\Users\The Meme Machine\Desktop\nodejsprojects\electronrat\index.js:42:64)
at process._tickCallback (internal/process/next_tick.js:68:7)
Thanks to user str I saw that my lastcmd was undefined when I ran the comparison the first time, this would break it and thereby loop the same error over and over, by addding
grabCurrentInstructions("https://pastebin.com/raw/i9cYsAt1").then(
response => {
lastcmd = response;
}
);
below this line
win.loadURL(`file://${__dirname}/index.html`);
I made sure that the last command sent while the app was offline wouldn't be executed on launch and fixing my problem at the same time!

Get list of blocked requests in Puppeteer

I am trying to get a list of requests that are blocked by the browser (HTTP on HTTPS sites). I tried
page.on('requestfailed', request => {
console.log(request.url());
console.log('failed');
});
but it did not provide the requests. The only requests I saw were those of URLs that do not exist anymore. The blocked (HTTP) request do also not trigger the request event.
page.on('request', request => {
console.log(request.url());
}
Is there another event I can listen to?
EDIT:
I can see that something failed when I use
page._client.on('Network.loadingFailed', async event => {
const request = await page._networkManager._requestIdToRequest.get(event.requestId);
console.log(event);
console.log(request);
});
but the request var is undefined. So I don't know which request failed.
Found out how to solve this
page._client.on('Network.loadingFailed', async event => {
if (requestToBySend[event.requestId] !== undefined) {
let reason = '';
if (event.blockedReason !== undefined) {
reason = event.blockedReason;
} else {
reason = event.errorText;
}
console.log('blocked: ' + requestToBySend[event.requestId] + '; reason: ' + reason);
}
});
page._client.on('Network.requestWillBeSent', async event => {
requestToBySend[event.requestId] = event.request.url;
});
The requestId is known on requestWillbeSent event. I just store those ids to look them up when I need them.
The Chrome events I use can be found here: https://chromedevtools.github.io/devtools-protocol/tot/Network#event-loadingFailed

Redux saga: yield put not working inside nested callback

const { payload: {loginType, email, password, notification, self} } = action;
console.log("--TRY--");
Firebase.login(loginType, { email, password })
.catch(function(result) {
const message =
result && result.message ? result.message : 'Sorry Some error occurs';
notification('error', message);
self.setState({
confirmLoading: false
});
isError = true;
})
.then(function(result) {
if (isError) {
return;
}
if (!result || result.message) {
const message =
result && result.message
? result.message
: 'Sorry Some error occurs';
notification('error', message);
self.setState({
confirmLoading: false
});
} else {
self.setState({
visible: false,
confirmLoading: false
});
console.log("--RIGHT BEFORE I CHECK AUTH STATE--");
//the following does NOT fire
firebaseAuth().onAuthStateChanged(function*(user) {
console.log("THE GENERATOR RUNS");
if (user) {
console.log(user);
yield put({
type: actions.LOGIN_SUCCESS,
token: 'secret token',
profile: 'Profile'
});
yield put(push('/dashboard'));
}
else {
yield put({ type: actions.LOGIN_ERROR });
}
});
}
}); });
Hi. I'm currently working with redux saga for the first time. I've been trying to get yield put to fire in the callback of the firebaseAuth().onAuthStateChanged listener. The yield keyword won't work in a function that is not an ES6 generator, so I added an asterisk to the callback but now it won't execute at all. Would really appreciate any advice on the matter.
As you noticed, redux-saga effects can only be used within a generator function, and you cannot use a generator function as a regular function: calling a generator function only returns a special object.
The right way to approach this is to use an eventChannel: it lets you connect your saga to a source of events external to the redux ecosystem.
First create your eventChannel using the provided factory function: it hands you an emit function that you can use to emit events; then consume these events using the take effect.
import { eventChannel } from 'redux-saga';
import { cancelled, take } from 'redux-saga/effects';
// first create your eventChannel
const authEventsChannel = eventChannel( emit => {
const unsubscribe = firebaseAuth().onAuthStateChanged( user => {
emit({ user });
});
// return a function that can be used to unregister listeners when the saga is cancelled
return unsubscribe;
});
// then monitor those events in your saga
try {
while (true) {
const { user } = yield take (authEventsChannel);
// handle auth state
}
} finally {
// unregister listener if the saga was cancelled
if (yield cancelled()) authEventsChannel.close();
}

Possible to run Headless Chrome/Chromium in a Google Cloud Function?

Is there any way to run Headless Chrome/Chromium in a Google Cloud Function? I understand I can include and run statically compiled binaries in GCF. Can I get a statically compiled version of Chrome that would work for this?
The Node.js 8 runtime for Google Cloud Functions now includes all the necessary OS packages to run Headless Chrome.
Here is a code sample of an HTTP function that returns screenshots:
Main index.js file:
const puppeteer = require('puppeteer');
exports.screenshot = async (req, res) => {
const url = req.query.url;
if (!url) {
return res.send('Please provide URL as GET parameter, for example: ?url=https://example.com');
}
const browser = await puppeteer.launch({
args: ['--no-sandbox']
});
const page = await browser.newPage();
await page.goto(url);
const imageBuffer = await page.screenshot();
await browser.close();
res.set('Content-Type', 'image/png');
res.send(imageBuffer);
}
and package.json
{
"name": "screenshot",
"version": "0.0.1",
"dependencies": {
"puppeteer": "^1.6.2"
}
}
I've just deployed a GCF function running headless Chrome. A couple takeways:
you have to statically compile Chromium and NSS on Debian 8
you have to patch environment variables to point to NSS before launching Chromium
performance is much worse than what you'd get on AWS Lambda (3+ seconds)
For 1, you should be able to find plenty of instructions online.
For 2, the code that I'm using is the following:
static executablePath() {
let bin = path.join(__dirname, '..', 'bin', 'chromium');
let nss = path.join(__dirname, '..', 'bin', 'nss', 'Linux3.16_x86_64_cc_glibc_PTH_64_OPT.OBJ');
if (process.env.PATH === undefined) {
process.env.PATH = path.join(nss, 'bin');
} else if (process.env.PATH.indexOf(nss) === -1) {
process.env.PATH = [path.join(nss, 'bin'), process.env.PATH].join(':');
}
if (process.env.LD_LIBRARY_PATH === undefined) {
process.env.LD_LIBRARY_PATH = path.join(nss, 'lib');
} else if (process.env.LD_LIBRARY_PATH.indexOf(nss) === -1) {
process.env.LD_LIBRARY_PATH = [path.join(nss, 'lib'), process.env.LD_LIBRARY_PATH].join(':');
}
if (fs.existsSync('/tmp/chromium') === true) {
return '/tmp/chromium';
}
return new Promise(
(resolve, reject) => {
try {
fs.chmod(bin, '0755', () => {
fs.symlinkSync(bin, '/tmp/chromium'); return resolve('/tmp/chromium');
});
} catch (error) {
return reject(error);
}
}
);
}
You also need to use a few required arguments when starting Chrome, namely:
--disable-dev-shm-usage
--disable-setuid-sandbox
--no-first-run
--no-sandbox
--no-zygote
--single-process
I hope this helps.
As mentioned in the comment, work is being done on a possible solution to running a headless browser in a cloud function. A directly applicable discussion:"headless chrome & aws lambda" can be read on Google Groups.
The question at. had was can you run headless chrome or chromium in Firebase Cloud Functions... the answer is NO! since the node.js project will not have access any chrome/chromium executables and therefore will fail! (TRUST ME - I've Tried!).
A better solutions is to use the Phantom npm package, which uses PhantomJS under the hood:
https://www.npmjs.com/package/phantom
Docs and info can be found here:
http://amirraminfar.com/phantomjs-node/#/
or
https://github.com/amir20/phantomjs-node
The site i was trying to crawl had implemented screen scraping software, the trick is to wait for the page to load by searching for expected string, or regex match, i.e. i do a regex for a , if you need a regex of any complexity made for you - get in touch at https://AppLogics.uk/ - starting at £5 (GPB).
here is a typescript snippet to make the http or https call:
const phantom = require('phantom');
const instance: any = await phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page: any = await instance.createPage();
const status = await page.open('https://somewebsite.co.uk/');
const content = await page.property('content');
same again in JavaScript:
const phantom = require('phantom');
const instance = yield phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page = yield instance.createPage();
const status = yield page.open('https://somewebsite.co.uk/');
const content = yield page.property('content');
Thats the easy bit! if its a static page your pretty much done and you can parse the HTML into something like the cheerio npm package: https://github.com/cheeriojs/cheerio - an implementation of core JQuery designed for servers!
However if it is a dynamically loading page, i.e. lazy loading, or even anti-scraping methods, you will need to wait for the page to update by looping and calling the page.property('content') method and running a text search or regex to see if your page has finished loading.
I have created a generic asynchronous function returning the page content (as a string) on success and throws an exception on failure or timeout. It takes as parameters the variables for the page, text (string to search for that indicates success), error (string to indicate failure or null to not check for error), and timeout (number - self explanatory):
TypeScript:
async function waitForPageToLoadStr(page: any, text: string, error: string, timeout: number): Promise<string> {
const maxTime = timeout ? (new Date()).getTime() + timeout : null;
let html: string = '';
html = await page.property('content');
async function loop(): Promise<string>{
async function checkSuccess(): Promise <boolean> {
html = await page.property('content');
if (!isNullOrUndefined(error) && html.includes(error)) {
throw new Error(`Error string found: ${ error }`);
}
if (maxTime && (new Date()).getTime() >= maxTime) {
throw new Error(`Timed out waiting for string: ${ text }`);
}
return html.includes(text)
}
if (await checkSuccess()){
return html;
} else {
return loop();
}
}
return await loop();
}
JavaScript:
function waitForPageToLoadStr(page, text, error, timeout) {
return __awaiter(this, void 0, void 0, function* () {
const maxTime = timeout ? (new Date()).getTime() + timeout : null;
let html = '';
html = yield page.property('content');
function loop() {
return __awaiter(this, void 0, void 0, function* () {
function checkSuccess() {
return __awaiter(this, void 0, void 0, function* () {
html = yield page.property('content');
if (!isNullOrUndefined(error) && html.includes(error)) {
throw new Error(`Error string found: ${error}`);
}
if (maxTime && (new Date()).getTime() >= maxTime) {
throw new Error(`Timed out waiting for string: ${text}`);
}
return html.includes(text);
});
}
if (yield checkSuccess()) {
return html;
}
else {
return loop();
}
});
}
return yield loop();
});
}
I have personally used this function like this:
TypeScript:
try {
const phantom = require('phantom');
const instance: any = await phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page: any = await instance.createPage();
const status = await page.open('https://somewebsite.co.uk/');
await waitForPageToLoadStr(page, '<div>Welcome to somewebsite</div>', '<h1>Website under maintenance, try again later</h1>', 1000);
} catch (error) {
console.error(error);
}
JavaScript:
try {
const phantom = require('phantom');
const instance = yield phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page = yield instance.createPage();
yield page.open('https://vehicleenquiry.service.gov.uk/');
yield waitForPageToLoadStr(page, '<div>Welcome to somewebsite</div>', '<h1>Website under maintenance, try again later</h1>', 1000);
} catch (error) {
console.error(error);
}
Happy crawling!

WebRTC: Connecting multiple listeners to one client, one at a time

I have been trying to get WebRTC to function with a broadcaster and multiple listeners but am stuck when it comes to transferal descriptions and candidates via signalling (with nodejs & socket.io).
I can get the process working between two browsers with a simple nodejs socket app which simply broadcasts the descriptions and candidates to other already connected clients, but when I attempt to store a description and connect with a newly opened browser, nothing happens.
What I basically need to understand is what do I need to provide to one browser, in order for it to begin communicating with another? The project I am working on requires the ability for listeners to join rooms, authenticate, and begin listening to whatever media is being sent.
Below is my client side code:
var audioContext = new webkitAudioContext()
var client = null
var configuration =
{
'iceServers':
[{
'url': 'stun:stun.example.org'
}]
}
$(function ()
{
window.RTCPeerConnection = window.RTCPeerConnection || window.webkitRTCPeerConnection || window.mozRTCPeerConnection
client = new RTCPeerConnection(configuration, { optional:[ { RtpDataChannels: true } ]})
client.onnegotiationneeded = function ()
{
console.log('Negotiation needed')
createOffer()
}
client.onicecandidate = function (event)
{
console.log('onicecandidate')
socket.emit('candidate', JSON.stringify({ 'candidate': event.candidate }))
}
client.onaddstream = function (event)
{
console.log('onaddstream')
$('#player').attr('src', URL.createObjectURL(event.stream))
player.play()
}
socket.on('candidate', function (event)
{
candidate(event)
})
socket.on('description', function (message)
{
if(!client) { return }
client.setRemoteDescription(new RTCSessionDescription(message.sdp), function () {
if (client.remoteDescription.type == 'offer')
client.createAnswer(function (description)
{
client.setLocalDescription(description, function ()
{
socket.emit('description', JSON.stringify({ 'sdp':client.localDescription }))
})
}, function (err)
{
console.log('error: ' + err)
})
}, function(err)
{
console.log('error: ' + err)
})
})
addStream()
})
function createOffer ()
{
if(!client) { return; }
client.createOffer(function (description)
{
console.log(description)
client.setLocalDescription(description, function ()
{
socket.emit('description', JSON.stringify({ 'sdp': client.localDescription }))
console.log('set local description')
})
})
}
function candidate (message)
{
if(message.candidate)
{
console.log('candidate')
client.addIceCandidate(new RTCIceCandidate(message.candidate))
}
}
function addStream ()
{
navigator.webkitGetUserMedia({audio: true, video: false}, function(stream)
{
client.addStream(stream)
})
}
And my signalling part of my server as it currently stands:
io.on 'connection', (socket) ->
socket.on 'description', (data) ->
parsed = JSON.parse data
socket.broadcast.emit 'description', parsed
socket.on 'candidate', (candidate) ->
parsed = JSON.parse candidate
socket.broadcast.emit 'candidate', parsed
I'd appreciate any insight into this. Thanks.
The "PeerConnection" as the name indicates can be used with only one other peer. You cannot cache the offer SDP generated by one PeerConnection instance to use it with more than one other peers.
In your case, you must create a PeerConnection for each browser that you want to send/receive audio and video from and then exchange the corresponding SDP offer and answers with those browsers via your signaling mechanism.
Please feel free to go through some of the links I have mentioned here to understand how WebRTC works.