how to integrate lighthouse with testcafe? - google-chrome

I need to pass the connection argument while calling lighthouse
https://github.com/GoogleChrome/lighthouse/blob/master/lighthouse-core/index.js#L41
async function lighthouse(url, flags = {}, configJSON, connection) {
// verify the url is valid and that protocol is allowed
if (url && (!URL.isValid(url) || !URL.isProtocolAllowed(url))) {
throw new LHError(LHError.errors.INVALID_URL);
}
// set logging preferences, assume quiet
flags.logLevel = flags.logLevel || 'error';
log.setLevel(flags.logLevel);
const config = generateConfig(configJSON, flags);
connection = connection || new ChromeProtocol(flags.port, flags.hostname);
// kick off a lighthouse run
return Runner.run(connection, {url, config});
}
And in my testcafe my tests look like
test('Run lighthouse, async t => {
lighthouse('https://www.youtube.com', {}, {}, ????)
})
I am unable to retrieve the connection of the chrome instance that testcafe had opened up, instead of spawning a new chromeRunner

there is an npm library called testcafe-lighthouse which helps to audit web pages using TestCafe. It also has the capability to produce an HTML detailed report.
Install the plugin by:
$ yarn add -D testcafe-lighthouse
# or
$ npm install --save-dev testcafe-lighthouse
Audit with default threshold
import { testcafeLighthouseAudit } from 'testcafe-lighthouse';
fixture(`Audit Test`).page('http://localhost:3000/login');
test('user performs lighthouse audit', async () => {
const currentURL = await t.eval(() => document.documentURI);
await testcafeLighthouseAudit({
url: currentURL,
cdpPort: 9222,
});
});
Audit with custom Thresold:
import { testcafeLighthouseAudit } from 'testcafe-lighthouse';
fixture(`Audit Test`).page('http://localhost:3000/login');
test('user page performance with specific thresholds', async () => {
const currentURL = await t.eval(() => document.documentURI);
await testcafeLighthouseAudit({
url: currentURL,
thresholds: {
performance: 50,
accessibility: 50,
'best-practices': 50,
seo: 50,
pwa: 50,
},
cdpPort: 9222,
});
});
you need to kick start the test like below:
# headless mode, preferable for CI
npx testcafe chrome:headless:cdpPort=9222 test.js
# non headless mode
npx testcafe chrome:emulation:cdpPort=9222 test.js
I hope it will help your automation journey.

I did something similar, I launch ligthouse with google chrome on a specific port using CLI
npm run testcafe -- chrome:headless:cdpPort=1234
Then I make the lighthouse function to get port as an argument
export default async function lighthouseAudit(url, browser_port){
let result = await lighthouse(url, {
port: browser_port, // Google Chrome port Number
output: 'json',
logLevel: 'info',
});
return result;
};
Then you can simply run the audit like
test(`Generate Light House Result `, async t => {
auditResult = await lighthouseAudit('https://www.youtube.com',1234);
});
Hopefully It helps

Related

React, Hardhat frontend smart contract method calling, how to do so?

I'm using hardhat locally and have a react frontend up and running but I can't call the methods without errors.
I've tried both ethers.js and web3.
Here's my code and attempts. Please let me know if you see what I'm doing wrong.
I'm trying to interact with contracts that are deployed in the local hardhat env through web3
I'm unable to get back the data from the contract, here's the info
I have:
var list = await contract.methods.getList();
console.log("list ", list );
which gets me
list {arguments: Array(0), call: ƒ, send: ƒ, encodeABI: ƒ, estimateGas: ƒ, …}
When I do
var list = await contract.methods.getList().call();
console.log("list ", list );
I get this error in the browser:
Returned values aren't valid, did it run Out of Gas? You might also see this error if you are not using the correct ABI for the contract you are retrieving data from, requesting data from a block number that does not exist, or querying a node which is not fully synced.
I do:
Setup in console:
npx hardhat node
>Started HTTP and WebSocket JSON-RPC server at http://127.0.0.1:8545/
>Accounts
>========
>...
npx hardhat compile
> Nothing to compile
npx hardhat run scripts/deploy.js --network hardhat
Note: In the deploy.js file, I do a
const list = await contract.getList();
console.log("list", list ); // correctly outputs ["string", "string"]
The method:
mapping(uint256 => address) internal list;
uint256 internal listCount;
function getList() public override view returns (address[] memory) {
address[] memory assets = new address[](listCount);
for (uint256 i = 0; i < listCount; i++) {
assets[i] = list[i];
}
return assets;
}
In react App.js:
import Contract_from './data/abi/Contract_.json'; // Contract_ is a placer
var contract = new web3.eth.Contract(Contract_, address_given_on_deploy);
var contractAddress = await contract .options.address; // correctly outputs
var list= await contract.methods.getList().call();
console.log("list", list);
As you see, this doesn't return the values from the method. What am I doing wrong here?
For any reason and may be likely the issue, here's my config:
require("#nomiclabs/hardhat-waffle");
// openzeppelin adds
require("#nomiclabs/hardhat-ethers");
require('#openzeppelin/hardhat-upgrades');
//abi
require('hardhat-abi-exporter');
// This is a sample Hardhat task. To learn how to create your own go to
// https://hardhat.org/guides/create-task.html
task("accounts", "Prints the list of accounts", async () => {
const accounts = await ethers.getSigners();
for (const account of accounts) {
console.log(account.address);
}
});
// You need to export an object to set up your config
// Go to https://hardhat.org/config/ to learn more
/**
* #type import('hardhat/config').HardhatUserConfig
*/
module.exports = {
networks: {
hardhat: {
gas: 12000000,
blockGasLimit: 0x1fffffffffffff,
allowUnlimitedContractSize: true,
timeout: 1800000,
chainId: 1337
}
},
solidity: {
compilers: [
{
version: "0.8.0",
settings: {
optimizer: {
enabled: true,
runs: 1000
}
}
},
{
version: "0.8.2",
settings: {
optimizer: {
enabled: true,
runs: 1000
}
}
},
],
},
abiExporter: {
path: './frontend/src/data/abi',
clear: true,
flat: true,
only: [],
spacing: 2
}
}
__
I thought maybe i would try ethers.js since that is what i do my testing in but same issue.
For whatever reason, I can "get" the contracts, print the methods that belong to them, but I can't actually call the methods.
Here's my ethers.js brevity:
provider = new ethers.providers.Web3Provider(window.ethereum);
if(provider != null){
const _contract = new ethers.Contract(address, _Contract, provider);
var list= await _contract.getList().call();
console.log("list", list);
}
The error i get from this is:
Error: call revert exception (method="getList()", errorArgs=null, errorName=null, errorSignature=null, reason=null, code=CALL_EXCEPTION, version=abi/5.4.0)
I've tried numerous contracts in the protocol and same thing for each

Node Elasticsearch - Bulk indexing not working - Content-Type header [application/x-ldjson] is not supported

Being new to elasticsearch, am exploring it by integrating with node and trying to execute the following online git example in windows.
https://github.com/sitepoint-editors/node-elasticsearch-tutorial
while trying to import the data of 1000 items from data.json, the execution 'node index.js' is failing with the following error.
By enabling the trace, I now see the following as the root cause from the bulk function.
"error": "Content-Type header [application/x-ldjson] is not supported",
** "status": 406**
I see a change log from https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog.html which says following
13.0.0 (Apr 24 2017) bulk and other APIs that send line-delimited JSON bodies now use the Content-Type: application/x-ndjson header #507
Any idea how to resolve this content type issue in index.js?
index.js
(function () {
'use strict';
const fs = require('fs');
const elasticsearch = require('elasticsearch');
const esClient = new elasticsearch.Client({
host: 'localhost:9200',
log: 'error'
});
const bulkIndex = function bulkIndex(index, type, data) {
let bulkBody = [];
data.forEach(item => {
bulkBody.push({
index: {
_index: index,
_type: type,
_id: item.id
}
});
bulkBody.push(item);
});
esClient.bulk({body: bulkBody})
.then(response => {
console.log(`Inside bulk3...`);
let errorCount = 0;
response.items.forEach(item => {
if (item.index && item.index.error) {
console.log(++errorCount, item.index.error);
}
});
console.log(`Successfully indexed ${data.length - errorCount} out of ${data.length} items`);
})
.catch(console.err);
};
// only for testing purposes
// all calls should be initiated through the module
const test = function test() {
const articlesRaw = fs.readFileSync('data.json');
const articles = JSON.parse(articlesRaw);
console.log(`${articles.length} items parsed from data file`);
bulkIndex('library', 'article', articles);
};
test();
module.exports = {
bulkIndex
};
} ());
my local windows environment:
java version 1.8.0_121
elasticsearch version 6.1.1
node version v8.9.4
npm version 5.6.0
The bulk function doesn't return a promise. It accepts a callback function as a parameter.
esClient.bulk(
{body: bulkBody},
function(err, response) {
if (err) { console.err(err); return; }
console.log(`Inside bulk3...`);
let errorCount = 0;
response.items.forEach(item => {
if (item.index && item.index.error) {
console.log(++errorCount, item.index.error);
}
});
console.log(`Successfully indexed ${data.length - errorCount} out of ${data.length} items`);
}
)
or use promisify to convert a function accepting an (err, value) => ... style callback to a function that returns a promise.
const esClientBulk = util.promisify(esClient.bulk)
esClientBulk({body: bulkBody})
.then(...)
.catch(...)
EDIT: Just found out that elasticsearch-js supports both callbacks and promises. So this should not be an issue.
By looking at the package.json of the project that you've linked, it uses elasticsearch-js version ^11.0.1 which is an old version and that is sending requests with application/x-ldjson header for bulk upload, which is not supported by newer elasticsearch versions. So, upgrading elasticsearch-js to a newer version (current latest is 14.0.0) should fix it.

How to response a plain text in feathersjs websocket api?

I define a feathers service api as below:
class Monitor {
find(_) {
const metrics = prom.register.metrics();
log.info(metrics);
return new Promise((resolve) => {
resolve({text: metrics});
});
}
}
function restFormatter(req, res) {
res.format({
'text/plain': function() {
log('xxxx:', res);
res.end(`The Message is: "${res.data}"`);
}
});
}
module.exports = function () {
const app = this;
// Initialize our service with any options it requires
const service = new Monitor();
app.configure(rest(restFormatter)).use('/metrics', service);
// Get our initialize service to that we can bind hooks
const monitorService = app.service('/metrics');
// Set up our before hooks
monitorService.before(hooks.before);
// Set up our after hooks
monitorService.after(hooks.after);
return service;
};
module.exports.Monitor = Monitor;
when call this API from browser, I get below response:
"# HELP nodejs_gc_runs_total Count of total garbage collections.\n# TYPE nodejs_gc_runs_total counter\n\n# HELP nodejs_gc_pause_seconds_total Time spent in GC Pause in seconds.\n# TYPE nodejs_gc_pause_seconds_total counter\n\n# HELP nodejs_gc_reclaimed_bytes_total Total number of bytes reclaimed by GC.\n# TYPE nodejs_gc_reclaimed_bytes_total counter\n"
from above output you can see that feathersjs doesn't return the data in plain text format. It transpile my response text into a string. Below is the output from express service shown in the browser:
# HELP nodejs_gc_runs_total Count of total garbage collections.
# TYPE nodejs_gc_runs_total counter
# HELP nodejs_gc_pause_seconds_total Time spent in GC Pause in seconds.
# TYPE nodejs_gc_pause_seconds_total counter
# HELP nodejs_gc_reclaimed_bytes_total Total number of bytes reclaimed by GC.
# TYPE nodejs_gc_reclaimed_bytes_total counter
# HELP newConnection The number of requests served
# TYPE newConnection counter
this output is what I really want. How can I make them feathersjs service return above output?
Below is my feathersjs configuration part:
app
.use(compress())
.options('*', cors())
.use(cors())
.use('/', serveStatic(app.get('public')))
.use(bodyParser.json())
.use(bodyParser.urlencoded({extended: true}))
.configure(hooks())
.configure(rest())
.configure(
swagger({
docsPath: '/docs',
uiIndex: path.join(__dirname, '../public/docs.html'),
info: {
title: process.env.npm_package_fullName,
description: process.env.npm_package_description
}
})
)
.configure(
primus(
{
transformer: 'websockets',
timeout: false
},
(primus) => {
primus.library();
primus.save(path.join(__dirname, '../public/dist/primus.js'));
}
)
)
.configure(services)
.configure(middleware);
You are configuring feathers-rest twice which is why you still get the old output. Remove the app.configure(rest(restFormatter)) from your service file and then either change .configure(rest()) to .configure(rest(restFormatter)) in the main file to use the formatter to apply to all services or register a custom middleware for the service that does the formatting just for that service:
app.use('/metrics', service, function(req, res) {
res.format({
'text/plain': function() {
log('xxxx:', res);
res.end(`The Message is: "${res.data}"`);
}
});
});

Possible to run Headless Chrome/Chromium in a Google Cloud Function?

Is there any way to run Headless Chrome/Chromium in a Google Cloud Function? I understand I can include and run statically compiled binaries in GCF. Can I get a statically compiled version of Chrome that would work for this?
The Node.js 8 runtime for Google Cloud Functions now includes all the necessary OS packages to run Headless Chrome.
Here is a code sample of an HTTP function that returns screenshots:
Main index.js file:
const puppeteer = require('puppeteer');
exports.screenshot = async (req, res) => {
const url = req.query.url;
if (!url) {
return res.send('Please provide URL as GET parameter, for example: ?url=https://example.com');
}
const browser = await puppeteer.launch({
args: ['--no-sandbox']
});
const page = await browser.newPage();
await page.goto(url);
const imageBuffer = await page.screenshot();
await browser.close();
res.set('Content-Type', 'image/png');
res.send(imageBuffer);
}
and package.json
{
"name": "screenshot",
"version": "0.0.1",
"dependencies": {
"puppeteer": "^1.6.2"
}
}
I've just deployed a GCF function running headless Chrome. A couple takeways:
you have to statically compile Chromium and NSS on Debian 8
you have to patch environment variables to point to NSS before launching Chromium
performance is much worse than what you'd get on AWS Lambda (3+ seconds)
For 1, you should be able to find plenty of instructions online.
For 2, the code that I'm using is the following:
static executablePath() {
let bin = path.join(__dirname, '..', 'bin', 'chromium');
let nss = path.join(__dirname, '..', 'bin', 'nss', 'Linux3.16_x86_64_cc_glibc_PTH_64_OPT.OBJ');
if (process.env.PATH === undefined) {
process.env.PATH = path.join(nss, 'bin');
} else if (process.env.PATH.indexOf(nss) === -1) {
process.env.PATH = [path.join(nss, 'bin'), process.env.PATH].join(':');
}
if (process.env.LD_LIBRARY_PATH === undefined) {
process.env.LD_LIBRARY_PATH = path.join(nss, 'lib');
} else if (process.env.LD_LIBRARY_PATH.indexOf(nss) === -1) {
process.env.LD_LIBRARY_PATH = [path.join(nss, 'lib'), process.env.LD_LIBRARY_PATH].join(':');
}
if (fs.existsSync('/tmp/chromium') === true) {
return '/tmp/chromium';
}
return new Promise(
(resolve, reject) => {
try {
fs.chmod(bin, '0755', () => {
fs.symlinkSync(bin, '/tmp/chromium'); return resolve('/tmp/chromium');
});
} catch (error) {
return reject(error);
}
}
);
}
You also need to use a few required arguments when starting Chrome, namely:
--disable-dev-shm-usage
--disable-setuid-sandbox
--no-first-run
--no-sandbox
--no-zygote
--single-process
I hope this helps.
As mentioned in the comment, work is being done on a possible solution to running a headless browser in a cloud function. A directly applicable discussion:"headless chrome & aws lambda" can be read on Google Groups.
The question at. had was can you run headless chrome or chromium in Firebase Cloud Functions... the answer is NO! since the node.js project will not have access any chrome/chromium executables and therefore will fail! (TRUST ME - I've Tried!).
A better solutions is to use the Phantom npm package, which uses PhantomJS under the hood:
https://www.npmjs.com/package/phantom
Docs and info can be found here:
http://amirraminfar.com/phantomjs-node/#/
or
https://github.com/amir20/phantomjs-node
The site i was trying to crawl had implemented screen scraping software, the trick is to wait for the page to load by searching for expected string, or regex match, i.e. i do a regex for a , if you need a regex of any complexity made for you - get in touch at https://AppLogics.uk/ - starting at £5 (GPB).
here is a typescript snippet to make the http or https call:
const phantom = require('phantom');
const instance: any = await phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page: any = await instance.createPage();
const status = await page.open('https://somewebsite.co.uk/');
const content = await page.property('content');
same again in JavaScript:
const phantom = require('phantom');
const instance = yield phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page = yield instance.createPage();
const status = yield page.open('https://somewebsite.co.uk/');
const content = yield page.property('content');
Thats the easy bit! if its a static page your pretty much done and you can parse the HTML into something like the cheerio npm package: https://github.com/cheeriojs/cheerio - an implementation of core JQuery designed for servers!
However if it is a dynamically loading page, i.e. lazy loading, or even anti-scraping methods, you will need to wait for the page to update by looping and calling the page.property('content') method and running a text search or regex to see if your page has finished loading.
I have created a generic asynchronous function returning the page content (as a string) on success and throws an exception on failure or timeout. It takes as parameters the variables for the page, text (string to search for that indicates success), error (string to indicate failure or null to not check for error), and timeout (number - self explanatory):
TypeScript:
async function waitForPageToLoadStr(page: any, text: string, error: string, timeout: number): Promise<string> {
const maxTime = timeout ? (new Date()).getTime() + timeout : null;
let html: string = '';
html = await page.property('content');
async function loop(): Promise<string>{
async function checkSuccess(): Promise <boolean> {
html = await page.property('content');
if (!isNullOrUndefined(error) && html.includes(error)) {
throw new Error(`Error string found: ${ error }`);
}
if (maxTime && (new Date()).getTime() >= maxTime) {
throw new Error(`Timed out waiting for string: ${ text }`);
}
return html.includes(text)
}
if (await checkSuccess()){
return html;
} else {
return loop();
}
}
return await loop();
}
JavaScript:
function waitForPageToLoadStr(page, text, error, timeout) {
return __awaiter(this, void 0, void 0, function* () {
const maxTime = timeout ? (new Date()).getTime() + timeout : null;
let html = '';
html = yield page.property('content');
function loop() {
return __awaiter(this, void 0, void 0, function* () {
function checkSuccess() {
return __awaiter(this, void 0, void 0, function* () {
html = yield page.property('content');
if (!isNullOrUndefined(error) && html.includes(error)) {
throw new Error(`Error string found: ${error}`);
}
if (maxTime && (new Date()).getTime() >= maxTime) {
throw new Error(`Timed out waiting for string: ${text}`);
}
return html.includes(text);
});
}
if (yield checkSuccess()) {
return html;
}
else {
return loop();
}
});
}
return yield loop();
});
}
I have personally used this function like this:
TypeScript:
try {
const phantom = require('phantom');
const instance: any = await phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page: any = await instance.createPage();
const status = await page.open('https://somewebsite.co.uk/');
await waitForPageToLoadStr(page, '<div>Welcome to somewebsite</div>', '<h1>Website under maintenance, try again later</h1>', 1000);
} catch (error) {
console.error(error);
}
JavaScript:
try {
const phantom = require('phantom');
const instance = yield phantom.create(['--ignore-ssl-errors=yes', '--load-images=no']);
const page = yield instance.createPage();
yield page.open('https://vehicleenquiry.service.gov.uk/');
yield waitForPageToLoadStr(page, '<div>Welcome to somewebsite</div>', '<h1>Website under maintenance, try again later</h1>', 1000);
} catch (error) {
console.error(error);
}
Happy crawling!

Unable to load app with protractor test runner

I am new to AngularJS. I'm trying to run end-to-end tests with Protractor. Currently, I am running my tests from grunt with help from grunt-protractor-runner. My base test looks like the following:
describe('My Tests', function () {
var p = protractor.getInstance();
beforeEach(function () {
});
it('My First Test', function () {
var message = "Hello!";
expect(message).toEqual('Hello!');
});
});
This works just fine. However, it really doesn't test my app. To do that I always want to start in the root of the app. In an attempt to do this, I've updated the above to the following:
describe('My Tests', function () {
var p = protractor.getInstance();
beforeEach(function () {
p.get('#/');
});
it('My First Test', function () {
var message = "Hello!";
expect(message).toEqual('Hello!');
});
});
When this test gets ran, Chrome launches. However, "about:blank" is what gets loaded in the address bar. My app never loads. I've reviewed my protractor.config.js file and it looks correct. It looks like the following:
exports.config = {
allScriptsTimeout: 110000,
seleniumServerJar: './node_modules/protractor/bin/selenium/selenium-server-standalone-2.37.0.jar',
seleniumPort: 1234,
seleniumArgs: [],
seleniumAddress: null,
chromeDriver: './node_modules/protractor/bin/selenium/chromedriver.exe',
capabilities: { 'browserName': 'chrome' },
specs: [ '../tests/**/*.spec.js' ],
jasmineNodeOpts: {
showColors: true,
defaultTimeoutInterval: 30000
}
};
How do I get my app to load into Chrome for the purpose of an integration test via protractor?
Perhaps you've already figured out how to get it working, but if not maybe the following will help (modify the port if necessary of course):
// A base URL for your application under test. Calls to protractor.get()
// with relative paths will be prepended with this.
baseUrl: 'http://localhost:3000'
Add this property to your protractor.config.js file.
Reference: https://github.com/angular/protractor/blob/master/referenceConf.js