I'm using gulp-tar to create a tar file... how do I add a top level folder so that when the user runs tar -xzf myArchive.tar it extracts into a specific folder.
here's my code:
gulp.task('prod', ['min', 'gittag'], function() {
//copy all files under /server into a zip file
gulp.src('../server/**/*')
.pipe(tar('xoserver' + '-'+ gittag +'.tar'))
.pipe(gzip())
.pipe(gulp.dest('../prod'));
});
The above creates a tar.zip file all right, but I have to be careful to add a -C <folder> while extracting, else the files get extracted to the current folder.
[edited]
What I'm trying to do here is generate a tarball of the format xoserver-alpha-d414ddf.tar.gz which, when extracted with a tar xvf will create a folder xoserver-alpha-d414ddf and unpack all the files under it. Essentially I am trying to add new folder name above my packed files.
If I add a base option, the folder extracted to is just server
[ANSWER]
Thanks to ddprrt for a good answer. I am reproducing the final code in case someone else wants to use a similar strategy of embedding the git tag into the name of the tarball for distribution/testing.
gulp.task('gittag', function(cb) { // generate the git tag
git.exec({args : 'branch -v'}, function (err, stdout) {
var lines = stdout.split('\n');
for (var l in lines) {
if (lines[l][0] == '*') {
var words = lines[l].split(/\s+/);
gittag = words[1]+ '-' + words[2];
console.log('Gittag is %s', gittag);
break;
}
}
cb();
});
});
gulp.task('min', ['runbmin', 'template', 'vendor']); // generate min files
gulp.task('prod', ['min', 'gittag'], function() { // create tarball
//copy all files under /server into a zip file
return gulp.src('../server/**/*')
.pipe(rename(function(path) {
path.dirname = 'server-' + gittag + '/' + path.dirname;
}))
.pipe(tar('xoserver-'+gittag+'.tar'))
.pipe(gzip())
.pipe(gulp.dest('../prod'));
});
This is what the base option is for.
gulp.task('prod', ['min', 'gittag'], function() {
return gulp.src('../server/**/*', { base: '../server/' })
.pipe(tar('xoserver' + '-'+ gittag +'.tar'))
.pipe(gzip())
.pipe(gulp.dest('../prod'));
});
With it you can tell gulp which paths to include when dealing with the globs you receive.
Btw. Don't forget to return streams or call the done callback in your task. Helps gulp orchestrating your build pipeline
As for the second question, you can use gulp-rename task to change the directory where your virtual files are located. Would be something like
.pipe(rename(function(path) {
path.dirname = 'whatever/' + path.dirname
}));
Related
How can I validate CSV files (encoding, headline, delimiter, column count) in my Grunt build? I had a look at CSVLint but neither get it to work, nor know how to include it in Grunt.
Edit: PapaParse looks promising, but has no Grunt integration either.
Although grunt integration for PapaParse does not exist its API can be utilized by configuring a custom Function Task inside your Gruntfile.js.
Install papaparse via npm
Firstly, cd to your project directory, install papaparse via npm and add it to the devDependencies section of your projects package.json. To do this run the following command via your CLI tool:
$ npm i -D papaparse
Gruntfile.js
The following gist shows how to configure a custom Function Task named validateCSV in your Gruntfile.js.
module.exports = function(grunt) {
// Requirements
var fs = require('fs');
var Papa = require('papaparse');
// Other project configuration tasks.
grunt.initConfig({
// ...
});
/**
* Register a custom Function task to validate .csv files using Papa Parse.
*/
grunt.registerTask('validateCSV', 'Lint .csv files via Papa Parse', function() {
var glob = './csv/*.csv'; // <-- Note: Edit glob pattern as required.
var success = true;
// Create an Array of all .csv files using the glob pattern provided.
var csvFiles = grunt.file.expand(glob).map(function(file) {
return file;
});
// Report if no .csv files were found and return early.
if (csvFiles.length === 0) {
grunt.log.write('No .csv files were found');
return;
}
// Loop over each .csv file in the csvFiles Array.
csvFiles.forEach(function(csvFile) {
// Read the contents of the .csv file.
var csvString = fs.readFileSync(csvFile, {
encoding: 'utf8'
});
// Parse the .csv contents via Papa Parse.
var papa = Papa.parse(csvString, {
delimiter: ',',
newline: '',
quoteChar: '"',
header: true,
skipEmptyLines: true
// For additional config options visit:
// http://papaparse.com/docs#config
});
// Basic error and success logging.
if (papa.errors.length > 0) {
grunt.log.error('Error(s) in file: '['red'] + csvFile['red']);
// Report each error for a single .csv file.
// For additional Papa Parse errors visit:
// http://papaparse.com/docs#errors
papa.errors.forEach(function(error) {
grunt.log.write('\n type: ' + error.type);
grunt.log.write('\n code: ' + error.code);
grunt.log.write('\n message: ' + error.message);
grunt.log.write('\n row: ' + error.row + '\n\n');
});
// Indicate that a .csv file failed validation.
success = false;
} else {
grunt.log.ok('No errors found in file: ' + csvFile);
}
});
// If errors are found in any of the .csv files this will
// prevent subsequent defined tasks from being processed.
if (!success) {
grunt.fail.warn('Errors(s) were found when validating .csv files');
}
});
// Register the custom Function task.
grunt.registerTask('default', [
'validateCSV'
// ...
]);
};
Notes
The following line of code (taken from the Gruntfile.js above) that reads:
var glob = './csv/*.csv';
... will need to be changed/edited according to your project requirements. Currently the globbing pattern assumes all .csv files reside inside a folder named csv.
You may also need to set the config options as per your requirements.
The custom Function Task also includes some basic error and success reporting that will be logged to the CLI.
Running the Task
To run the grunt task simply execute the following via your CLI tool:
$ grunt validateCSV
EDIT: Updated Answer (based on the following comment...)
Would it also be possible to "configure" the task from within the
grunt.initConfig()? For example linting different CSV directories?
To achieve this you can create a separate Javascript module that exports a Registered MutliTask.
Lets call it papaparse.js and save it to a directory named custom-grunt-tasks which resides in the same top level directory as your Gruntfile.js
Note: This .js file and directory name can be any name that you prefer, however you will need to updated the references inside Gruntfile.js.
papaparse.js
module.exports = function(grunt) {
'use strict';
// Requirements
var fs = require('fs');
var Papa = require('papaparse');
grunt.registerMultiTask('papaparse', 'Misc Tasks', function() {
// Default options. These are used when no options are
// provided via the initConfig({...}) papaparse task.
var options = this.options({
quotes: false,
delimiter: ',',
newline: '',
quoteChar: '"',
header: true,
skipEmptyLines: true
});
// Loop over each path provided via the src array.
this.data.src.forEach(function(dir) {
// Append a forward slash If a directory path
// provided does not end in with one.
if (dir.slice(-1) !== '/') {
dir += '/';
}
// Generate the globbin pattern.
var glob = [dir, '*.csv'].join('');
// Create an Array of all .csv files using the glob pattern.
var csvFiles = grunt.file.expand(glob).map(function(file) {
return file;
});
// Report if no .csv files were found and return early.
if (csvFiles.length === 0) {
grunt.log.write(
'>> No .csv files found using the globbing '['yellow'] +
'pattern: '['yellow'] + glob['yellow']
);
return;
}
// Loop over each .csv file in the csvFiles Array.
csvFiles.forEach(function(csvFile) {
var success = true;
// Read the contents of the .csv file.
var csvString = fs.readFileSync(csvFile, {
encoding: 'utf8'
});
// Parse the .csv contents via Papa Parse.
var papa = Papa.parse(csvString, options);
// Basic error and success logging.
if (papa.errors.length > 0) {
grunt.log.error('Error(s) in file: '['red'] + csvFile['red']);
// Report each error for a single .csv file.
// For additional Papa Parse errors visit:
// http://papaparse.com/docs#errors
papa.errors.forEach(function(error) {
grunt.log.write('\n type: ' + error.type);
grunt.log.write('\n code: ' + error.code);
grunt.log.write('\n message: ' + error.message);
grunt.log.write('\n row: ' + error.row + '\n\n');
});
// Indicate that a .csv file failed validation.
success = false;
} else {
grunt.log.ok('No errors found in file: ' + csvFile);
}
// If errors are found in any of the .csv files this will prevent
// subsequent files and defined tasks from being processed.
if (!success) {
grunt.fail.warn('Errors(s) found when validating .csv files');
}
});
});
});
};
Gruntfile.js
Your Gruntfile.js can then be configured something like this:
module.exports = function(grunt) {
grunt.initConfig({
// ...
papaparse: {
setOne: {
src: ['./csv/', './csv2']
},
setTwo: {
src: ['./csv3/'],
options: {
skipEmptyLines: false
}
}
}
});
// Load the custom multiTask named `papaparse` - which is defined in
// `papaparse.js` stored in the directory named `custom-grunt-tasks`.
grunt.loadTasks('./custom-grunt-tasks');
// Register and add papaparse to the default Task.
grunt.registerTask('default', [
'papaparse' // <-- This runs Targets named setOne and setTwo
// ...
]);
// `papaparse.js` allows for multiple targets to be defined, so
// you can use the colon notation to just run one Target.
// The following only runs the setTwo Target.
grunt.registerTask('processOneTarget', [
'papaparse:setTwo'
// ...
]);
};
Running the Task
The papaparse Task has been added to the taskList Array of the default Task, so it can be executed by entering the following via your CLI tool:
$ grunt
Notes
Running the example gist by entering $ grunt via your CLI will process all .csv files inside the directories named csv, csv2, and csv3.
Running $ grunt processOneTarget via your CLI will process only .csv files inside the directory named csv3.
As the papaparse.js utilizes a MultiTask you'll notice that in the papaparse Task defined in Gruntfile.js it includes two Targets. Namely setOne and setTwo.
The setOne Target src Array defines paths to two directories that should be processed. I.e. Directories ./csv/ and ./csv2. All .csv files found in these paths will be processed using the default papaparse options defined in papaparse.js as the Target does not define any custom options.
The setTwo target src Array defines a path to one directory. (I.e. ./csv3/). All .csv files found in this path will be processed using the default papaparse options defined in papaparse.js with the exception of the skipEmptyLines option as it's set to false.
You may find that simply defining one Target in Gruntfile.js with multiple paths in the src Array, without any custom options, meets your requirement. For Example:
// ...
grunt.initConfig({
// ...
papaparse: {
myTask: {
src: ['./csv/', './csv2', './csv3']
}
}
// ...
});
// ...
Hope this helps!
I'm using Gulp to compress a zip file and then upload it to AWS Lambda. The upload zip file is done manually. Only the process of compressing is handled by Gulp.
Here is my gulpfile.js
var gulp = require('gulp');
var zip = require('gulp-zip');
var del = require('del');
var install = require('gulp-install');
var runSequence = require('run-sequence');
var awsLambda = require("node-aws-lambda");
gulp.task('clean', function() {
return del(['./dist', './dist.zip']);
});
gulp.task('js', function() {
return gulp.src('index.js')
.pipe(gulp.dest('dist/'));
});
gulp.task('npm', function() {
return gulp.src('./package.json')
.pipe(gulp.dest('dist/'))
.pipe(install({production: true}));
});
gulp.task('zip', function() {
return gulp.src(['dist/**/*', '!dist/package.json'])
.pipe(zip('dist.zip'))
.pipe(gulp.dest('./'));
});
gulp.task('deploy', function(callback) {
return runSequence(
['clean'],
['js', 'npm'],
['zip'],
callback
);
});
After running the deploy task, a zip folder named dist.zip is created consists of a index.js file and a node_modules folder. The node_modules folder contains only a lodash library.
This is index.js
var _ = require('lodash');
console.log('Loading function');
exports.handler = (event, context, callback) => {
//console.log('Received event:', JSON.stringify(event, null, 2));
var b = _.chunk(['a', 'b', 'c', 'd', 'e'], 3);
console.log(b);
callback(null, event.key1); // Echo back the first key value
//callback('Something went wrong');
};
After using AWS lambda console to upload the dist.zip folder. There is an error showing that the lodash library cannot be found
{
"errorMessage": "Cannot find module 'lodash'",
"errorType": "Error",
"stackTrace": [
"Function.Module._load (module.js:276:25)",
"Module.require (module.js:353:17)",
"require (internal/module.js:12:17)",
"Object.<anonymous> (/var/task/index.js:1:71)",
"Module._compile (module.js:409:26)",
"Object.Module._extensions..js (module.js:416:10)",
"Module.load (module.js:343:32)",
"Function.Module._load (module.js:300:12)",
"Module.require (module.js:353:17)"
]
}
But in the zip folder, there is a node_modules directory that contains the lodash lib.
dist.zip
|---node_modules
|--- lodash
|---index.js
When i zip the node_modules directory and the file index.js manually, it works fine.
Does anyone have idea what wrongs ? Maybe when compressing using Gulp, there is a misconfigured for the lib path ?
I had same problem few days back.
Everyone pointed to gulp zip, however it was not problem with gulp zip.
Below worked fine:
gulp
.src(['sourceDir/**'], {nodir: true, dot: true} )
.pipe(zip('target.zip'))
.pipe(gulp.dest('build/'));
That is, note the below, in 2nd param of src, in the above:
{nodir: true, dot: true}
That is, we have to include dot files for the zip (ex: .config, .abc, etc.)
So, include above in .src of gulp, else all others like copy, zip, etc. will be improper.
The package gulp-zip is massively popular (4.3k downloads per day) and there does not seem to be any Gulp substitute. The problem is definitely with relative paths and how gulp-zip processes them. Even when using a base path option in the gulp.src function (example below), gulp-zip finds a way to mess it up.
gulp.task("default", ["build-pre-zip"], function () {
return gulp.src([
"dist/**/*"
], { base: "dist/" })
.pipe(debug())
.pipe(zip("dist.zip"))
.pipe(gulp.dest("./dist/"));
});
Since there is no good Gulp solution as of 1/4/2017 I suggest a work-around. I use Gulp to populate the dist folder first, exactly how I need it with the proper node_modules folder. Then it is time to zip the dist folder properly with relative file paths stored. To do that and also update Lambda, I use a batch file (Windows) of command line options to get the job done. Here is the upload.bat file I created to take the place of the gulp-zip task:
start /wait cmd /c "gulp default"
start /wait cmd /c "C:\Program Files\WinRAR\WinRAR.exe" a -r -ep1 dist\dist.zip dist\*.*
aws lambda update-function-code --zip-file fileb://dist/dist.zip --function-name your-fn-name-here
If you use WinRAR you will find their command line docs here, for WinZip go here. That .bat file assumes you are using the AWS Command Line Interface (CLI) which is a godsend; get it here.
If you are wishing this answer pointed you towards a 100% Gulp solution, to that I say, "You and me both!". Good luck.
I'm trying to build multiple libs for a single application using gulp.
I have this directory structure
dist/ # expected result
lib1.js
lib2.js
src/
libs/
lib1/
... # js files for lib1
lib2/
... # js files for lib2
wrap/ # wrappers, one per lib
lib1.js
lib2.js
And I wrote this gulp task, using through2 :
gulp.task('build', function() {
var dn = 'azerty'; // whatever
return gulp.src('src/libs/**/*.js')
.pipe(through.obj(function(file, enc, cb) {
// for extracting a lib's dirname
var str = path.dirname(file.path),
parts = str.split('/'),
rev = parts.reverse();
dn = rev[0];
util.log(dn); // al is ok here...
cb();
}))
.pipe(concat(dn + '.js'))
.pipe(wrap({ src : 'src/wrap/' + dn + '.js'}))
.pipe(gulp.dest('dist'));
});
But it doesn't work properly, the value of 'dn' variable seems to be lost at the concat point, this is very strange because 'dn' is a global variable relatively to the task :*
How to work around this ? By using gulp-foreach or something other ?
Regards.
EDIT : this is sadly the same with gulp-foreach... someone has an idea to solve this ?
I want to pass a saved file which does not need to be compiled or does not need to be saved to a new location to gulp-tap so I can run an external script on it.
Right now I watch a complete directory and on each save I upload the whole directory:
gulp.task('shopify_theme',function(){
gulp.src( './theme/**/*.liquid' )
.pipe(tap(function(file){
upload(file);
}));
})
And this is the upload part (theme is an application that uploads assets to shopify)
var upload = function( file ){
var splitPath = file.path.split('theme/').pop();
run('theme upload ' + splitPath, { cwd: 'theme' }).exec();
};
Every time I save a liquid file in the /theme directory all files (theme/**/*.liquid) are uploaded. gulp-changed doesn't work as at the time the task runs the destination and the source are the same.
What's the best way to only upload the changed file?
You can use gulp.watch to watch for changes to individual files:
var upload = function(filePath){
var splitPath = filePath.split('theme/').pop();
run('theme upload ' + splitPath, { cwd: 'theme' }).exec();
};
gulp.watch('./theme/**/*.liquid', function(evnt) {
upload(evnt.path);
});
Just learning Gulp. Looks great, but I can't find any information on how to make a complete distribution with it.
Let's say I want to use Gulp to concatenate and minify my CSS and JS, and optimise my images.
In doing so I change the location of JS scripts in my build directory (eg. from bower_components/jquery/dist/jquery.js to js/jquery.js).
How do I automatically update my build HTML/PHP documents to reference the correct files? What is the standard way of doing this?
How do I copy over the rest of my project files?. These are files that need to be included as part of the distribution, such as HTML, PHP, various txt, JSON and all sorts of other files. Surely I don't have to copy and paste those from my development directory each time I do a clean build with Gulp?
Sorry for asking what are probably very n00bish questions. It's possible I should be using something else other than Gulp to manage these, but I'm not sure where to start.
Many thanks in advance.
Point #1
The way i used to achieve this:
var scripts = [];
function getScriptStream(dir) { // Find it as a gulp module or create it
var devT = new Stream.Transform({objectMode: true});
devT._transform = function(file, unused, done) {
scripts.push(path.relative(dir, file.path));
this.push(file);
done();
};
return devT;
}
// Bower
gulp.task('build_bower', function() {
var jsFilter = g.filter('**/*.js');
var ngFilter = g.filter(['!**/angular.js', '!**/angular-mocks.js']);
return g.bowerFiles({
paths: {
bowerDirectory: src.vendors
},
includeDev: !prod
})
.pipe(ngFilter)
.pipe(jsFilter)
.pipe(g.cond(prod, g.streamify(g.concat.bind(null, 'libs.js'))))
.pipe(getScriptStream(src.html))
.pipe(jsFilter.restore())
.pipe(ngFilter.restore())
.pipe(gulp.dest(build.vendors));
});
// JavaScript
gulp.task('build_js', function() {
return gulp.src(src.js + '/**/*.js', {buffer: buffer})
.pipe(g.streamify(g.jshint))
.pipe(g.streamify(g.jshint.reporter.bind(null, 'default')))
.pipe(g.cond(prod, g.streamify(g.concat.bind(null,'app.js'))))
.pipe(g.cond(
prod,
g.streamify.bind(null, g.uglify),
g.livereload.bind(null, server)
))
.pipe(gulp.dest(build.js))
.pipe(getScriptStream(build.html));
});
// HTML
gulp.task('build_html', ['build_bower', 'build_js', 'build_views',
'build_templates'], function() {
fs.writeFile('scripts.json', JSON.stringify(scripts));
return gulp.src(src.html + '/index.html' , {buffer: true})
.pipe(g.replace(/(^\s+)<!-- SCRIPTS -->\r?\n/m, function($, $1) {
return $ + scripts.map(function(script) {
return $1 + '<script type="text/javascript" src="'+script+'"></script>';
}).join('\n') + '\n';
}))
.pipe(gulp.dest(build.html));
});
It has the advantages of concatenating and minifying everything for production while include every files for testing purpose keeping error line numbers coherent.
Point 2
Copying files with gulp is just as simple as doing this:
gulp.src(path).pipe(gulp.dest(buildPath));
Bonus
I generally proceed to deployment by creating a "build" branch and just cloning her in the production server. I created buildbranch for that matter:
// Publish task
gulp.task('publish', function(cb) {
buildBranch({
branch: 'build',
ignore: ['.git', '.token', 'www', 'node_modules']
}, function(err) {
if(err) {
throw err;
}
cb();
});
});
To loosely answer my own question, several years later:
How do I automatically update my build HTML/PHP documents to reference the correct files? What is the standard way of doing this?
Always link to dist version, but ensure sourcemaps are created, so the source is easy to debug. Of course, the watch task is a must.
How do I copy over the rest of my project files?. These are files that need to be included as part of the distribution, such as HTML, PHP, various txt, JSON and all sorts of other files. Surely I don't have to copy and paste those from my development directory each time I do a clean build with Gulp?
This usually isn't a problem as there aren't offer too many files. Large files and configuration are often kept out if the repo, besides.