Gulp-js pass only saved file on to task - gulp

I want to pass a saved file which does not need to be compiled or does not need to be saved to a new location to gulp-tap so I can run an external script on it.
Right now I watch a complete directory and on each save I upload the whole directory:
gulp.task('shopify_theme',function(){
gulp.src( './theme/**/*.liquid' )
.pipe(tap(function(file){
upload(file);
}));
})
And this is the upload part (theme is an application that uploads assets to shopify)
var upload = function( file ){
var splitPath = file.path.split('theme/').pop();
run('theme upload ' + splitPath, { cwd: 'theme' }).exec();
};
Every time I save a liquid file in the /theme directory all files (theme/**/*.liquid) are uploaded. gulp-changed doesn't work as at the time the task runs the destination and the source are the same.
What's the best way to only upload the changed file?

You can use gulp.watch to watch for changes to individual files:
var upload = function(filePath){
var splitPath = filePath.split('theme/').pop();
run('theme upload ' + splitPath, { cwd: 'theme' }).exec();
};
gulp.watch('./theme/**/*.liquid', function(evnt) {
upload(evnt.path);
});

Related

how to download automatically without print pop up (node js)

I'm making a project where when the page opens, the pdf file is automatically downloaded, I managed to use this:
window.addEventListener('load', () => {
window.print();
})
but I want when windows opens the file is directly downloaded to the directory that I have defined, for example D: / myproject.
is there any way? I don't use pdf library because I make pdf with css myself.
thank you
window will not be accessible on server side code.
If you want to download file in browser, on just open the web page the you can use res.download() as follows:
app.get('/download', function(req, res){
const file = `${__dirname}/upload-folder/file_name.pdf`;
res.download(file); // Set disposition and send it.
});
As you want to download the file in a specific directory, so you can use the npm module
var download = require('download-file')
app.get('/download', function(req, res){
var url = ${__dirname} + "/upload-folder/file_name.pdf";
var options = {
directory: "path of directory/",
filename: "file_name.pdf"
}
download(url, options, function(err){
if (err) throw err
res.send("Done"); // Set disposition and send it.
})
});
Edit: To convert the html code into the pdf you can use the npm module jspdf

validate CSV files in Grunt build

How can I validate CSV files (encoding, headline, delimiter, column count) in my Grunt build? I had a look at CSVLint but neither get it to work, nor know how to include it in Grunt.
Edit: PapaParse looks promising, but has no Grunt integration either.
Although grunt integration for PapaParse does not exist its API can be utilized by configuring a custom Function Task inside your Gruntfile.js.
Install papaparse via npm
Firstly, cd to your project directory, install papaparse via npm and add it to the devDependencies section of your projects package.json. To do this run the following command via your CLI tool:
$ npm i -D papaparse
Gruntfile.js
The following gist shows how to configure a custom Function Task named validateCSV in your Gruntfile.js.
module.exports = function(grunt) {
// Requirements
var fs = require('fs');
var Papa = require('papaparse');
// Other project configuration tasks.
grunt.initConfig({
// ...
});
/**
* Register a custom Function task to validate .csv files using Papa Parse.
*/
grunt.registerTask('validateCSV', 'Lint .csv files via Papa Parse', function() {
var glob = './csv/*.csv'; // <-- Note: Edit glob pattern as required.
var success = true;
// Create an Array of all .csv files using the glob pattern provided.
var csvFiles = grunt.file.expand(glob).map(function(file) {
return file;
});
// Report if no .csv files were found and return early.
if (csvFiles.length === 0) {
grunt.log.write('No .csv files were found');
return;
}
// Loop over each .csv file in the csvFiles Array.
csvFiles.forEach(function(csvFile) {
// Read the contents of the .csv file.
var csvString = fs.readFileSync(csvFile, {
encoding: 'utf8'
});
// Parse the .csv contents via Papa Parse.
var papa = Papa.parse(csvString, {
delimiter: ',',
newline: '',
quoteChar: '"',
header: true,
skipEmptyLines: true
// For additional config options visit:
// http://papaparse.com/docs#config
});
// Basic error and success logging.
if (papa.errors.length > 0) {
grunt.log.error('Error(s) in file: '['red'] + csvFile['red']);
// Report each error for a single .csv file.
// For additional Papa Parse errors visit:
// http://papaparse.com/docs#errors
papa.errors.forEach(function(error) {
grunt.log.write('\n type: ' + error.type);
grunt.log.write('\n code: ' + error.code);
grunt.log.write('\n message: ' + error.message);
grunt.log.write('\n row: ' + error.row + '\n\n');
});
// Indicate that a .csv file failed validation.
success = false;
} else {
grunt.log.ok('No errors found in file: ' + csvFile);
}
});
// If errors are found in any of the .csv files this will
// prevent subsequent defined tasks from being processed.
if (!success) {
grunt.fail.warn('Errors(s) were found when validating .csv files');
}
});
// Register the custom Function task.
grunt.registerTask('default', [
'validateCSV'
// ...
]);
};
Notes
The following line of code (taken from the Gruntfile.js above) that reads:
var glob = './csv/*.csv';
... will need to be changed/edited according to your project requirements. Currently the globbing pattern assumes all .csv files reside inside a folder named csv.
You may also need to set the config options as per your requirements.
The custom Function Task also includes some basic error and success reporting that will be logged to the CLI.
Running the Task
To run the grunt task simply execute the following via your CLI tool:
$ grunt validateCSV
EDIT: Updated Answer (based on the following comment...)
Would it also be possible to "configure" the task from within the
grunt.initConfig()? For example linting different CSV directories?
To achieve this you can create a separate Javascript module that exports a Registered MutliTask.
Lets call it papaparse.js and save it to a directory named custom-grunt-tasks which resides in the same top level directory as your Gruntfile.js
Note: This .js file and directory name can be any name that you prefer, however you will need to updated the references inside Gruntfile.js.
papaparse.js
module.exports = function(grunt) {
'use strict';
// Requirements
var fs = require('fs');
var Papa = require('papaparse');
grunt.registerMultiTask('papaparse', 'Misc Tasks', function() {
// Default options. These are used when no options are
// provided via the initConfig({...}) papaparse task.
var options = this.options({
quotes: false,
delimiter: ',',
newline: '',
quoteChar: '"',
header: true,
skipEmptyLines: true
});
// Loop over each path provided via the src array.
this.data.src.forEach(function(dir) {
// Append a forward slash If a directory path
// provided does not end in with one.
if (dir.slice(-1) !== '/') {
dir += '/';
}
// Generate the globbin pattern.
var glob = [dir, '*.csv'].join('');
// Create an Array of all .csv files using the glob pattern.
var csvFiles = grunt.file.expand(glob).map(function(file) {
return file;
});
// Report if no .csv files were found and return early.
if (csvFiles.length === 0) {
grunt.log.write(
'>> No .csv files found using the globbing '['yellow'] +
'pattern: '['yellow'] + glob['yellow']
);
return;
}
// Loop over each .csv file in the csvFiles Array.
csvFiles.forEach(function(csvFile) {
var success = true;
// Read the contents of the .csv file.
var csvString = fs.readFileSync(csvFile, {
encoding: 'utf8'
});
// Parse the .csv contents via Papa Parse.
var papa = Papa.parse(csvString, options);
// Basic error and success logging.
if (papa.errors.length > 0) {
grunt.log.error('Error(s) in file: '['red'] + csvFile['red']);
// Report each error for a single .csv file.
// For additional Papa Parse errors visit:
// http://papaparse.com/docs#errors
papa.errors.forEach(function(error) {
grunt.log.write('\n type: ' + error.type);
grunt.log.write('\n code: ' + error.code);
grunt.log.write('\n message: ' + error.message);
grunt.log.write('\n row: ' + error.row + '\n\n');
});
// Indicate that a .csv file failed validation.
success = false;
} else {
grunt.log.ok('No errors found in file: ' + csvFile);
}
// If errors are found in any of the .csv files this will prevent
// subsequent files and defined tasks from being processed.
if (!success) {
grunt.fail.warn('Errors(s) found when validating .csv files');
}
});
});
});
};
Gruntfile.js
Your Gruntfile.js can then be configured something like this:
module.exports = function(grunt) {
grunt.initConfig({
// ...
papaparse: {
setOne: {
src: ['./csv/', './csv2']
},
setTwo: {
src: ['./csv3/'],
options: {
skipEmptyLines: false
}
}
}
});
// Load the custom multiTask named `papaparse` - which is defined in
// `papaparse.js` stored in the directory named `custom-grunt-tasks`.
grunt.loadTasks('./custom-grunt-tasks');
// Register and add papaparse to the default Task.
grunt.registerTask('default', [
'papaparse' // <-- This runs Targets named setOne and setTwo
// ...
]);
// `papaparse.js` allows for multiple targets to be defined, so
// you can use the colon notation to just run one Target.
// The following only runs the setTwo Target.
grunt.registerTask('processOneTarget', [
'papaparse:setTwo'
// ...
]);
};
Running the Task
The papaparse Task has been added to the taskList Array of the default Task, so it can be executed by entering the following via your CLI tool:
$ grunt
Notes
Running the example gist by entering $ grunt via your CLI will process all .csv files inside the directories named csv, csv2, and csv3.
Running $ grunt processOneTarget via your CLI will process only .csv files inside the directory named csv3.
As the papaparse.js utilizes a MultiTask you'll notice that in the papaparse Task defined in Gruntfile.js it includes two Targets. Namely setOne and setTwo.
The setOne Target src Array defines paths to two directories that should be processed. I.e. Directories ./csv/ and ./csv2. All .csv files found in these paths will be processed using the default papaparse options defined in papaparse.js as the Target does not define any custom options.
The setTwo target src Array defines a path to one directory. (I.e. ./csv3/). All .csv files found in this path will be processed using the default papaparse options defined in papaparse.js with the exception of the skipEmptyLines option as it's set to false.
You may find that simply defining one Target in Gruntfile.js with multiple paths in the src Array, without any custom options, meets your requirement. For Example:
// ...
grunt.initConfig({
// ...
papaparse: {
myTask: {
src: ['./csv/', './csv2', './csv3']
}
}
// ...
});
// ...
Hope this helps!

aws lambda nodejs - error when uploading a zip file compressing by GULP

I'm using Gulp to compress a zip file and then upload it to AWS Lambda. The upload zip file is done manually. Only the process of compressing is handled by Gulp.
Here is my gulpfile.js
var gulp = require('gulp');
var zip = require('gulp-zip');
var del = require('del');
var install = require('gulp-install');
var runSequence = require('run-sequence');
var awsLambda = require("node-aws-lambda");
gulp.task('clean', function() {
return del(['./dist', './dist.zip']);
});
gulp.task('js', function() {
return gulp.src('index.js')
.pipe(gulp.dest('dist/'));
});
gulp.task('npm', function() {
return gulp.src('./package.json')
.pipe(gulp.dest('dist/'))
.pipe(install({production: true}));
});
gulp.task('zip', function() {
return gulp.src(['dist/**/*', '!dist/package.json'])
.pipe(zip('dist.zip'))
.pipe(gulp.dest('./'));
});
gulp.task('deploy', function(callback) {
return runSequence(
['clean'],
['js', 'npm'],
['zip'],
callback
);
});
After running the deploy task, a zip folder named dist.zip is created consists of a index.js file and a node_modules folder. The node_modules folder contains only a lodash library.
This is index.js
var _ = require('lodash');
console.log('Loading function');
exports.handler = (event, context, callback) => {
//console.log('Received event:', JSON.stringify(event, null, 2));
var b = _.chunk(['a', 'b', 'c', 'd', 'e'], 3);
console.log(b);
callback(null, event.key1); // Echo back the first key value
//callback('Something went wrong');
};
After using AWS lambda console to upload the dist.zip folder. There is an error showing that the lodash library cannot be found
{
"errorMessage": "Cannot find module 'lodash'",
"errorType": "Error",
"stackTrace": [
"Function.Module._load (module.js:276:25)",
"Module.require (module.js:353:17)",
"require (internal/module.js:12:17)",
"Object.<anonymous> (/var/task/index.js:1:71)",
"Module._compile (module.js:409:26)",
"Object.Module._extensions..js (module.js:416:10)",
"Module.load (module.js:343:32)",
"Function.Module._load (module.js:300:12)",
"Module.require (module.js:353:17)"
]
}
But in the zip folder, there is a node_modules directory that contains the lodash lib.
dist.zip
|---node_modules
|--- lodash
|---index.js
When i zip the node_modules directory and the file index.js manually, it works fine.
Does anyone have idea what wrongs ? Maybe when compressing using Gulp, there is a misconfigured for the lib path ?
I had same problem few days back.
Everyone pointed to gulp zip, however it was not problem with gulp zip.
Below worked fine:
gulp
.src(['sourceDir/**'], {nodir: true, dot: true} )
.pipe(zip('target.zip'))
.pipe(gulp.dest('build/'));
That is, note the below, in 2nd param of src, in the above:
{nodir: true, dot: true}
That is, we have to include dot files for the zip (ex: .config, .abc, etc.)
So, include above in .src of gulp, else all others like copy, zip, etc. will be improper.
The package gulp-zip is massively popular (4.3k downloads per day) and there does not seem to be any Gulp substitute. The problem is definitely with relative paths and how gulp-zip processes them. Even when using a base path option in the gulp.src function (example below), gulp-zip finds a way to mess it up.
gulp.task("default", ["build-pre-zip"], function () {
return gulp.src([
"dist/**/*"
], { base: "dist/" })
.pipe(debug())
.pipe(zip("dist.zip"))
.pipe(gulp.dest("./dist/"));
});
Since there is no good Gulp solution as of 1/4/2017 I suggest a work-around. I use Gulp to populate the dist folder first, exactly how I need it with the proper node_modules folder. Then it is time to zip the dist folder properly with relative file paths stored. To do that and also update Lambda, I use a batch file (Windows) of command line options to get the job done. Here is the upload.bat file I created to take the place of the gulp-zip task:
start /wait cmd /c "gulp default"
start /wait cmd /c "C:\Program Files\WinRAR\WinRAR.exe" a -r -ep1 dist\dist.zip dist\*.*
aws lambda update-function-code --zip-file fileb://dist/dist.zip --function-name your-fn-name-here
If you use WinRAR you will find their command line docs here, for WinZip go here. That .bat file assumes you are using the AWS Command Line Interface (CLI) which is a godsend; get it here.
If you are wishing this answer pointed you towards a 100% Gulp solution, to that I say, "You and me both!". Good luck.

how to modify config files using gulp

I use gulp to configure complex local setup and need to auto-edit files.
The scenario is:
determine if certain file contains certain lines after certain other line (found using regular expression)
if line is not found, insert the line.
optionally, delete some lines found in the file.
I need this to amend system configuration files and compile scenarios.
What would be the best way to do it in gulp?
Gulp is plain javascript. So what I would do if I were you is to create a plugin to pipe to the original config file.
Gulp streams emit Vinyl files. So all you really got to do is to create a "pipe factory" that transforms the objects.
It would look something like this (using EventStream):
var es = require('event-stream');
// you could receive params in here if you're using the same
// plugin in different occasions.
function fixConfigFile() {
return es.map(function(file, cb) {
var fileContent = file.contents.toString();
// determine if certain file contains certain lines...
// if line is not found, insert the line.
// optionally, delete some lines found in the file.
// update the vinyl file
file.contents = new Buffer(fileContent);
// send the updated file down the pipe
cb(null, file);
});
}
gulp.task('fix-config', function() {
return gulp.src('path/to/original/*.config')
.pipe(fixConfigFile())
.pipe(gulp.dest('path/to/fixed/configs');
});
Or you can use vinyl-map:
const map = require('vinyl-map')
const gulp = require('gulp')
const modify = map((contents, filename) => {
contents = contents.toString()
// modify contents somehow
return contents
})
gulp.task('modify', () =>
gulp.src(['./index.js'])
.pipe(modify)
.pipe(gulp.dest('./dist'))
})

creating tar archives using gulp

I'm using gulp-tar to create a tar file... how do I add a top level folder so that when the user runs tar -xzf myArchive.tar it extracts into a specific folder.
here's my code:
gulp.task('prod', ['min', 'gittag'], function() {
//copy all files under /server into a zip file
gulp.src('../server/**/*')
.pipe(tar('xoserver' + '-'+ gittag +'.tar'))
.pipe(gzip())
.pipe(gulp.dest('../prod'));
});
The above creates a tar.zip file all right, but I have to be careful to add a -C <folder> while extracting, else the files get extracted to the current folder.
[edited]
What I'm trying to do here is generate a tarball of the format xoserver-alpha-d414ddf.tar.gz which, when extracted with a tar xvf will create a folder xoserver-alpha-d414ddf and unpack all the files under it. Essentially I am trying to add new folder name above my packed files.
If I add a base option, the folder extracted to is just server
[ANSWER]
Thanks to ddprrt for a good answer. I am reproducing the final code in case someone else wants to use a similar strategy of embedding the git tag into the name of the tarball for distribution/testing.
gulp.task('gittag', function(cb) { // generate the git tag
git.exec({args : 'branch -v'}, function (err, stdout) {
var lines = stdout.split('\n');
for (var l in lines) {
if (lines[l][0] == '*') {
var words = lines[l].split(/\s+/);
gittag = words[1]+ '-' + words[2];
console.log('Gittag is %s', gittag);
break;
}
}
cb();
});
});
gulp.task('min', ['runbmin', 'template', 'vendor']); // generate min files
gulp.task('prod', ['min', 'gittag'], function() { // create tarball
//copy all files under /server into a zip file
return gulp.src('../server/**/*')
.pipe(rename(function(path) {
path.dirname = 'server-' + gittag + '/' + path.dirname;
}))
.pipe(tar('xoserver-'+gittag+'.tar'))
.pipe(gzip())
.pipe(gulp.dest('../prod'));
});
This is what the base option is for.
gulp.task('prod', ['min', 'gittag'], function() {
return gulp.src('../server/**/*', { base: '../server/' })
.pipe(tar('xoserver' + '-'+ gittag +'.tar'))
.pipe(gzip())
.pipe(gulp.dest('../prod'));
});
With it you can tell gulp which paths to include when dealing with the globs you receive.
Btw. Don't forget to return streams or call the done callback in your task. Helps gulp orchestrating your build pipeline
As for the second question, you can use gulp-rename task to change the directory where your virtual files are located. Would be something like
.pipe(rename(function(path) {
path.dirname = 'whatever/' + path.dirname
}));