I have created a very complex build process for front-end of one web app, which is being tested on Appveyor. If some parts of the app are not being built correctly with gulp, if some gulp tasks fail, how do I signal the Appveyor that the build has failed in its entirety?
To solve this problem, I have used instructions from this article. I had a need to separate build process into two similar parts: one for development environment and another one for production environment. Main difference was that production environment should always break if error is found in some of the tasks. Feodor Fitsner suggested that process should exit with non-zero error code.
Combining these two solutions, I created this small JS module that should be used as a wrapper for gulp tasks:
const msg = require('bit-message-box')
const chalk = require('chalk')
module.exports = (taskFn, production = false) => function(done) {
let onSuccess = () => {
done()
}
let onError = (err) => {
if (production) {
// If build process is initiated in production env, it should always break
// on error with exit code higher than zero. This is especially important
// for Appveyor CI
msg.error(`ERROR! BUILD PROCESS ABORTED!`)
console.error(chalk.bgRed.white(err))
process.exit(1)
}
else { done() }
}
let outStream = taskFn(onSuccess, onError);
if (outStream && typeof outStream.on === 'function') {
outStream.on('end', onSuccess);
}
}
Then in gulp itself, you can import this module and use it in a following way:
const gulp = require('gulp')
const handleCI = require('./handleCI')
const sass = require('gulp-sass')
const PRODUCTION = true // use your own system to decide if this is true or false
gulp.task('styles', handleCI((success, error) => {
return gulp.src('./scss/style.scss')
.pipe(
sass()
.on('error', error) // Add this to handle errors
)
.pipe(
gulp.dest('./styles/')
.on('error', error)
)
}, PRODUCTION))
Related
I need to pass the connection argument while calling lighthouse
https://github.com/GoogleChrome/lighthouse/blob/master/lighthouse-core/index.js#L41
async function lighthouse(url, flags = {}, configJSON, connection) {
// verify the url is valid and that protocol is allowed
if (url && (!URL.isValid(url) || !URL.isProtocolAllowed(url))) {
throw new LHError(LHError.errors.INVALID_URL);
}
// set logging preferences, assume quiet
flags.logLevel = flags.logLevel || 'error';
log.setLevel(flags.logLevel);
const config = generateConfig(configJSON, flags);
connection = connection || new ChromeProtocol(flags.port, flags.hostname);
// kick off a lighthouse run
return Runner.run(connection, {url, config});
}
And in my testcafe my tests look like
test('Run lighthouse, async t => {
lighthouse('https://www.youtube.com', {}, {}, ????)
})
I am unable to retrieve the connection of the chrome instance that testcafe had opened up, instead of spawning a new chromeRunner
there is an npm library called testcafe-lighthouse which helps to audit web pages using TestCafe. It also has the capability to produce an HTML detailed report.
Install the plugin by:
$ yarn add -D testcafe-lighthouse
# or
$ npm install --save-dev testcafe-lighthouse
Audit with default threshold
import { testcafeLighthouseAudit } from 'testcafe-lighthouse';
fixture(`Audit Test`).page('http://localhost:3000/login');
test('user performs lighthouse audit', async () => {
const currentURL = await t.eval(() => document.documentURI);
await testcafeLighthouseAudit({
url: currentURL,
cdpPort: 9222,
});
});
Audit with custom Thresold:
import { testcafeLighthouseAudit } from 'testcafe-lighthouse';
fixture(`Audit Test`).page('http://localhost:3000/login');
test('user page performance with specific thresholds', async () => {
const currentURL = await t.eval(() => document.documentURI);
await testcafeLighthouseAudit({
url: currentURL,
thresholds: {
performance: 50,
accessibility: 50,
'best-practices': 50,
seo: 50,
pwa: 50,
},
cdpPort: 9222,
});
});
you need to kick start the test like below:
# headless mode, preferable for CI
npx testcafe chrome:headless:cdpPort=9222 test.js
# non headless mode
npx testcafe chrome:emulation:cdpPort=9222 test.js
I hope it will help your automation journey.
I did something similar, I launch ligthouse with google chrome on a specific port using CLI
npm run testcafe -- chrome:headless:cdpPort=1234
Then I make the lighthouse function to get port as an argument
export default async function lighthouseAudit(url, browser_port){
let result = await lighthouse(url, {
port: browser_port, // Google Chrome port Number
output: 'json',
logLevel: 'info',
});
return result;
};
Then you can simply run the audit like
test(`Generate Light House Result `, async t => {
auditResult = await lighthouseAudit('https://www.youtube.com',1234);
});
Hopefully It helps
I am using gulp for a project and I added lighthouse to the gulp tasks like this:
gulp.task("lighthouse", function(){
return launchChromeAndRunLighthouse('http://localhost:3800', flags, perfConfig).then(results => {
console.log(results);
});
});
And this is my launchChromeAndRunLighthouse() function
function launchChromeAndRunLighthouse(url, flags = {}, config = null) {
return chromeLauncher.launch().then(chrome => {
flags.port = chrome.port;
return lighthouse(url, flags, config).then(results =>
chrome.kill().then(() => results));
});
}
It gives me the json output in command line. I can post my json here and get the report.
Is there any way I can generate the HTML report using gulp ?
You are welcome to start a bounty if you think this question will be helpful for future readers.
The answer from #EMC is fine, but it requires multiple steps to generate the HTML from that point. However, you can use it like this (written in TypeScript, should be very similar in JavaScript):
const { write } = await import(root('./node_modules/lighthouse/lighthouse-cli/printer'));
Then call it:
await write(results, 'html', 'report.html');
UPDATE
There have been some changes to the lighthouse repo. I now enable programmatic HTML reports as follows:
const { write } = await import(root('./node_modules/lighthouse/lighthouse-cli/printer'));
const reportGenerator = await import(root('./node_modules/lighthouse/lighthouse-core/report/report-generator'));
// ...lighthouse setup
const raw = await lighthouse(url, flags, config);
await write(reportGenerator.generateReportHtml(raw.lhr), 'html', root('report.html'));
I know it's hacky, but it solves the problem :).
I've run into this issue too. I found somewhere in the github issues that you can't use the html option programmatically, but Lighthouse does expose the report generator, so you can write simple file write and open functions around it to get the same effect.
const ReportGenerator = require('../node_modules/lighthouse/lighthouse-core/report/v2/report-generator.js');
I do
let opts = {
chromeFlags: ['--show-paint-rects'],
output: 'html'
}; ...
const lighthouseResults = await lighthouse(urlToTest, opts, config = null);
and later
JSON.stringify(lighthouseResults.lhr)
to get the json
and
lighthouseResults.report.toString('UTF-8'),
to get the html
You can define the preconfig in the gulp as
const preconfig = {logLevel: 'info', output: 'html', onlyCategories: ['performance','accessibility','best-practices','seo'],port: (new URL(browser.wsEndpoint())).port};
The output option can be used as the html or json or csv. This preconfig is nothing but the configuration for the lighthouse based on how we want it to run and give us the solution.
I'm struggling to get a working custom build for Polymer using gulp. My goal is to get a Polymer 1 project written in es6 transpiled & bundled. I followed this guide https://github.com/PolymerElements/generator-polymer-init-custom-build.
The transpilation works well for single files, but any bundled js code is untranspiled (as written in es6). Here is my gulp task :
function build() {
return new Promise((resolve, reject) => { // eslint-disable-line no-unused-vars
// Okay, so first thing we do is clear the build directory
console.log(`Deleting ${buildDirectory} directory...`);
del([buildDirectory])
.then(() => {
// Okay, now let's get your source files
let sourcesStream = polymerProject.sources()
// Here's how splitHtml & gulpif work
.pipe(polymerProject.splitHtml())
// Transpile
.pipe($.sourcemaps.init())
.pipe($.if('*.js', $.babel({
presets: ['es2015']
})))
.pipe($.sourcemaps.write())
// Oh, well do you want to minify stuff? Go for it!
.pipe(gulpif(/\.js$/, uglify()))
.pipe(gulpif(/\.html$/, htmlMinifier()))
.pipe(gulpif(/\.(png|gif|jpg|svg)$/, imagemin()))
.pipe(polymerProject.rejoinHtml());
// Okay, now let's do the same to your dependencies
let dependenciesStream = polymerProject.dependencies()
// .pipe(polymerProject.bundler)
.pipe(polymerProject.splitHtml())
.pipe(gulpif(/\.js$/, uglify()))
.pipe(gulpif(/\.html$/, htmlMinifier()))
.pipe(polymerProject.rejoinHtml());
// Okay, now let's merge them into a single build stream
let buildStream = mergeStream(sourcesStream, dependenciesStream)
.once('data', () => {
console.log('Analyzing build dependencies...');
});
// #PROBLEM# -> All included sources won't be transpiled
buildStream = buildStream.pipe(polymerProject.bundler);
// Okay, time to pipe to the build directory
buildStream = buildStream.pipe(gulp.dest(buildDirectory));
// waitFor the buildStream to complete
return waitFor(buildStream);
})
.then(() => {
// Okay, now let's generate the Service Worker
console.log('Generating the Service Worker...');
return polymerBuild.addServiceWorker({
project: polymerProject,
buildRoot: buildDirectory,
bundled: true,
swPrecacheConfig: swPrecacheConfig
});
})
.then(() => {
// You did it!
console.log('Build complete!');
resolve();
});
});
}
gulp.task('build', build);
Thank you for your help.
es2015 is the same as es6, so you are telling babel to transpile to es6. (I'm still looking for the correct preset name for es5)
https://codeburst.io/javascript-wtf-is-es6-es8-es-2017-ecmascript-dca859e4821c
"ES5
December 2009: Nearly 10 years later, ES5 was released in 2009. It would then take almost six years for the next version of ECMAScript to be released.
ES6 / ES2015
June 2015: Perhaps the cause for all of your confusion begins here. You see, ES6 and ES2015 are the same thing."
maybe that's a babel thing.
Using babel 7 and gulp 4 (and webcomponents polymer 3):
const polymerBuild = require('polymer-build');
const config = require('./polymer.json')
const project = new polymerBuild.PolymerProject(config);
const polymerProject = new polymerBuild.PolymerProject(config); //yes two, I don't know why but it fails if I don't
const configBuild = require('gulp-polymer-build');
const babel = require("gulp-babel");
let build = (cb) => {
return configBuild.createBuildStreams(polymerProject).then(builds => {
let promises = [];
for (let name in builds) {
let dir = path.join(buildDir, name);
builds[name]
.pipe(gulpif (/\.js$/, babel({
presets: [
['env', {
"browserslist": "> 2%, ie 11, chrome 58, firefox 45"
}
]
],plugins: ["#babel/plugin-transform-modules-amd"]
})))
//.pipe(gulpif(/\.js$/, uglify()))
.pipe(project.addCustomElementsEs5Adapter())
.pipe(project.bundler())
.pipe(dest(dir));
promises.push(waitFor(builds[name]));
}
// ensure gulp waits for all streams to end
Promise.all(promises).then(() => cb(), (e) => console.error("something went wrong: ", e));
});
};
exports.build = build;
Then usage in html:
<head>
<script src="/build/es5prod/array-polyfill.js"></script> <!-- from mdn https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/fill#Polyfill -->
<script src="/node_modules/polymer-build/lib/babel-helpers-full.min.js"></script>
<script src="/node_modules/#polymer/esm-amd-loader/lib/esm-amd-loader.min.js"></script>
<script src="/node_modules/#webcomponents/webcomponentsjs/custom-elements-es5-adapter.js"></script>
<script src="/node_modules/#webcomponents/webcomponentsjs/webcomponents-loader.js"></script>
<!-- make sure you have the bundles dir also there -->
</head>
<script src="/app.js"></script> <!-- entry point to js bundle -->
i want to run task in sequence, but want to pass data from one task to another.
Here is my code.
Gulp task 1 -> compile Html
gulp.task('compile:html', function () {
return gulp.src(['src/templates/*.html', 'src/templates/*.template'])
.pipe(plumber())
.pipe(nunjucksRender({
data: templateConfig, //in templateConfig environment is saved
path: ['src/templates/']
}))
.pipe(gulp.dest('src/'));
});
Gulp Task 2 -> generate build
gulp.task('build:production', function (callback) {
var temp = templateConfig.env;
templateConfig.env = 'production';
gulp.start('compile:html');
setTimeout(function () {
//gulp.src('./src/assets/**/*.*').pipe(gulp.dest('./generated/dist/production/assets'));
gulp.src('src/*.html')
.pipe(userRef())
.pipe(gulpif('*.js', uglify()))
.pipe(gulpif('*.css', minifyCss()))
.pipe(gulp.dest('./dist/production'));
templateConfig.env = temp;
callback();
}, 2500);
});
currently i am using timeout in task 2 to give time for render templates according to environment.
task 2 (generate build) requirements
change environment in templateConfig, so template are generated accordingly
compile the template using task 1
wait for compilation to finish, can take long time
generate build to dist
The problem: I want to maintain 'collections' of files. This will help with build times, and flexibility. for example, everytime i edit my app.js file, I don't want to re-compile all my twitter bootstrap files.
I can certainly achieve this with 2 tasks and 2 watch declarations - the problem is that the tasks are identical save for the files array. Ideally I would like to pass through these as parameters in the watch declaration Is there a way to do something like the following psuedo-code?:
var files = {
scripts: [
'www/assets/scripts/plugins/**/*.js',
'www/assets/scripts/main.js',
],
vendor: [
'vendor/jquery/dist/jquery.js',
'vendor/jqueryui/ui/jquery.ui.widget.js',
'vendor/holderjs/holder.js'
],
};
...
gulp.task('js', ['lint'], function (files, output) {
return gulp.src(files)
.pipe(debug())
.pipe(concat(output))
.pipe(uglify({outSourceMap: true}))
.pipe(gulp.dest(targetJSDir))
.pipe(notify('JS minified'))
.on('error', gutil.log)
});
...
gulp.watch('scripts/**/*.js', ['lint', 'js'], files.scripts, 'app.min.js');
gulp.watch('vendor/**/*.js', ['lint', 'js'], files.vendor, 'vendor.min.js');
Flipping round another way: is to namespace the watch declaration that called the task? That way I could check which watch triggered the task, and conditional those things within the task itself.
the problem is that the tasks are identical save for the files array.
I believe lazypipe (see its gh page) is well
suited to your wants. This was an interesting problem. I'm going to try to answer both what I think you're asking about (which is satisfied by lazypipe) as well as what I think you're probably thinking about or would end up thinking about if you got past the parameterization of pipes issue.
One aspect of what we want is that we don't want to rerun jshint on files that haven't changed. Additionally, we want to keep it DRY, and we want to pick up new files in addition to changed ones.
This is tested and works for me:
var gulp = require('gulp');
var $ = require('gulp-load-plugins')();
var es = require('event-stream');
var lazypipe = require('lazypipe');
var gutil = require('gulp-util');
var path = require('path');
var files = {
scripts: ['src/**/*.js'],
vendor: ['vendor/**/*.js']
};
// sets up a lazy pipe that does jshint related stuff
function getJsMultiPipe(name) {
return lazypipe()
.pipe($.jshint)
.pipe($.jshint.reporter, 'jshint-stylish')
// if you don't want to fail on style errors remove/comment this out:
.pipe($.jshint.reporter, 'fail');
}
// sets up a lazy pipe that does concat and post-concat stuff
function getJsCombinedPipe(groupName, outfile) {
return lazypipe()
.pipe($.concat, outfile)
.pipe($.uglify, {outSourceMap: true})
.pipe(gulp.dest, 'build')
.pipe($.notify, {message: groupName + ' JS minified', onLast: true});
}
// sets up a pipe for the initial build task, combining the above two pipes
function getBuildPipe(groupName, outfile) {
return gulp.src(files[groupName])
.pipe(getJsMultiPipe(groupName)())
.pipe(getJsCombinedPipe(groupName, outfile)());
}
// sets up a watch pipe, such that only the changed file is jshinted,
// but all files are included in the concat steps
function setWatchPipe(groupName, outfile) {
return $.watch({
glob: files[groupName],
name: groupName,
emitOnGlob: false,
emit: 'one'
}, function(file, done) {
return file
.pipe($.debug({title: 'watch -- changed file'}))
.pipe(getJsMultiPipe(groupName)())
// switch context
.pipe(gulp.src(files[groupName]))
.pipe($.debug({title: 'watch -- entire group'}))
.pipe(getJsCombinedPipe(groupName, outfile)())
.pipe($.debug({title: 'watch -- concatted/source-mapped'}))
.pipe($.notify({message: 'JS minified', onLast: true}));
});
}
// task to do an initial full build
gulp.task('build', function() {
return es.merge(
getBuildPipe('scripts', 'app.min.js'),
getBuildPipe('vendor', 'vendor.min.js')
)
.pipe($.notify({message: 'JS minified', onLast: true}));
});
// task to do an initial full build and then set up watches for
// incremental change
gulp.task('watch', ['build'], function(done) {
setWatchPipe('scripts', 'app.min.js');
setWatchPipe('vendor', 'vendor.min.js');
done();
});
My dependencies look like:
"devDependencies": {
"jshint-stylish": "^0.1.5",
"gulp-concat": "^2.2.0",
"gulp-uglify": "^0.2.1",
"gulp-debug": "^0.3.0",
"gulp-notify": "^1.2.5",
"gulp-jshint": "^1.5.3",
"gulp": "^3.6.0",
"gulp-load-plugins": "^0.5.0",
"lazypipe": "^0.2.1",
"event-stream": "^3.1.1",
"gulp-util": "^2.2.14",
"gulp-watch": "^0.5.3"
}
EDIT: I just glanced at this again and I notice these lines:
// switch context
.pipe(gulp.src(files[groupName]))
Be aware that I believe the gulp.src API has changed since I wrote this, and that it currently doesn't switch the context when you pipe things into gulp.src, therefore this spot might require a change. For newer versions of gulp, I think what will happen is that you will be adding to the context, instead and presumably losing a small bit of efficiency.
You could write a wrapper function for tasks to capture parameters and pass it to the task. E.g. (with the help of the lodash library):
// We capture the options in this object. We use gulp.env as a base such that
// options from cli are also passed to the task.
var currentOpts = _.clone(gulp.env);
// Here we define a function that wraps a task such that it can receive
// an options object
function parameterized(taskFunc) {
return function() {
taskFunc.call(null, currentOpts);
}
}
// Here we create a function that can be used by gulp.watch to call
// a parameterized task. It can be passed an object of "task" : {options} pairs
// and it will return a task function that will capture these options
// before invoking the task.
function withArgs(tasks) {
return function() {
_.each(tasks, function (opts, task) {
currentOpts = _.extend(currentOpts, opts);
gulp.run(task);
currentOpts = _.clone(gulp.env);
});
}
}
var files = {
scripts : [ "src/**/*.js"],
vendor : ["vendor/**/*.js"
};
// We pass the task function to parameterized. This will create a wrapper
// function that will pass an options object to the actual task function
gulp.task("js", parameterized(function(opts) {
gulp.src(files[opts.target])
.pipe(concat(opts.output));
}));
gulp.task("watch", function() {
// The withArgs function creates a watch function that invokes
// tasks with an options argument
// In this case it will invoke the js task with the options object
// { target : "scripts", output : "scripts.min.js" }
gulp.watch(files.scripts, withArgs({
js : {
target : "scripts",
output : "scripts.min.js"
}
}));
gulp.watch(files.vendor, withArgs({
js : {
target : "vendor",
output : "vendor.min.js"
}
}));
});
I've faced the same problem - how to pass parameters to a gulp task. It's wierd that this feature is not builtin (it's such a common task to build, for instance, two versions of a package, parametrized task seems like a very DRY solution).
I wanted to make it as simple as possible, so my solution was to dynamically create tasks for an each possible parameter. It works ok if you have a small number of exactly defined values. It won't work for wide range values, like ints or floats.
The task definition is wrapped in a function that takes desired parameter and the parameter is appended to the task's name (with '$' between for convenience).
Your code could look like this:
function construct_js(myname, files, output) {
gulp.task('js$' + myname, ['lint'], function () {
return gulp.src(files)
.pipe(debug())
.pipe(concat(output))
.pipe(uglify({outSourceMap: true}))
.pipe(gulp.dest(targetJSDir))
.pipe(notify('JS minified'))
.on('error', gutil.log)
});
}
construct_js("app", files.scripts, 'app.min.js');
construct_js("vendor", files.vendor, 'vendor.min.js');
gulp.watch('scripts/**/*.js', ['lint', 'js$app']);
gulp.watch('vendor/**/*.js', ['lint', 'js$vendor']);
Or better, with a little change in the data definition, we invoke task generation in a loop (so if you add a new "version" in the configs array it will work right away:
var configs = [
{
name : "app",
output: 'app.min.js',
files: [ 'www/assets/scripts/plugins/**/*.js',
'www/assets/scripts/main.js',
]
},
{
name : "vendor",
output: 'vendor.min.js',
files: [ 'vendor/jquery/dist/jquery.js',
'vendor/jqueryui/ui/jquery.ui.widget.js',
'vendor/holderjs/holder.js'
]
}
];
function construct_js(taskConfig) {
gulp.task('js$' + taskConfig.name, ['lint'], function () {
return gulp.src(taskConfig.files)
.pipe(debug())
.pipe(concat(taskConfig.output))
.pipe(uglify({outSourceMap: true}))
.pipe(gulp.dest(targetJSDir))
.pipe(notify('JS minified'))
.on('error', gutil.log)
});
}
for (var i=0; i < configs.length; i++) {
construct_js(configs[i]);
}
If we use underscore for the last "for":
_(configs).each(construct_js);
I've used this approach in my project with good results.
I'd like to propose some alternatives. Suppose we have a task called build that we would like to conditionally uglify given a certain param.
The two approaches use two watches with a single build task.
Alternative #1:
You can use gulp-exec to fire up a task with parameters.
var exec = require('child_process').exec;
gulp.task('build', function(){
// Parse args here and determine whether to uglify or not
})
gulp.task('buildWithoutUglify' function(){
exec('gulp build --withoutUglify')
})
gulp.task('watch', function(){
gulp.watch(someFilePath, ['buildWithoutUglify'])
})
Please note that this approach is a bit slow since what it does is execute command line gulp.
Alternative #2:
Set a global variable:
var withUglify = false;
gulp.task('build', function(){
// Use something like ``gulp-if`` to conditionally uglify.
})
gulp.task('buildWithoutUglify' function(){
withUglify = true;
gulp.start('build');
})
gulp.task('watch', function(){
gulp.watch(someFilePath, ['buildWithoutUglify'])
})