What is the right way to use sharp with gulp? - gulp

I am trying to convert a bunch of svgs to png.
But due to the sad state of gulp-svg2png and gulp-sharp I'm evaluating other options; sharp looks promising enough.
Versions:
$ gulp --version
CLI version: 2.2.0
Local version: 4.0.2
$ node -
Welcome to Node.js v13.6.0.
Type ".help" for more information.
> require('./package.json').dependencies.sharp
'^0.24.0'
Unfortunately the following
gulp.task('svg-convert', function() {
var sharp = require('sharp')
gulp.src(config.svg2png)
.pipe(sharp().png())
.pipe(gulp.dest('./dist/images'))
});
sends this error:
TypeError [ERR_INVALID_ARG_TYPE] [ERR_INVALID_ARG_TYPE]: The "chunk" argument must be of type string or an instance of Buffer. Received an instance of File
at validChunk (_stream_writable.js:285:10)
at Sharp.Writable.write (_stream_writable.js:324:23)
...
(stacktrace continues)
Has someone solved this?
Or is there a better way to deal with this particular need?
Edited after #AllainLG answer.
These are my two takes.
Unfortunately both are failing.
gulp.task('svg2png', function() {
var sharp = require('sharp');
return gulp.src(c.svg2png)
.pipe(plug.each(function(content, file, callback) {
var newContent = sharp(content).png(); // here content is a buffer containing the image, newContent should be too?
// var newContent = sharp(content).png().toBuffer(); // this sends a Promise and fails
return callback(null, newContent);
}), 'buffer')
.pipe(plug.rename(function(path) {
return path.extname = ".png";
}))
.pipe(plug.imagemin())
.pipe(gulp.dest('./dist/images'));
});
This pipes nothing to the destination: nothing in ./dist/images.
This second attempt uses gulp-tap.
gulp.task('svg2png2', function() {
var sharp = require('sharp');
return gulp.src(c.svg2png)
.pipe(plug.tap(function(file) {
return file.contents = sharp(file.contents).png();
}))
.pipe(plug.rename(function(path) {
return path.extname = ".png";
}))
// .pipe(plug.imagemin()) // sends "Streaming not supported"
.pipe(gulp.dest('./dist/images'));
});
This generates empty files in the destination (with the right names).

The following works, with the minor bonus of an ~100ms (average 10 complex svgs ~430ms) time improvement over gulp-svg2png (average 10 complex svgs ~540ms).
It uses through2.
I'll accept this answer in around a month unless someone has a better answer.
gulp.task('svg2png', function() {
var sharp, through2;
sharp = require('sharp');
through2 = require('through2');
return gulp.src(c.svg2png)
.pipe(through2.obj(function(file, _, cb) {
return sharp(file.contents).png().toBuffer().then(function(buffer) {
file.contents = buffer;
return cb(null, file);
}).catch(function(err) {
console.error(err);
return cb(null, file);
});
}))
.pipe(plug.rename(function(path) {
return path.extname = ".png";
}))
.pipe(plug.imagemin())
.pipe(gulp.dest('./dist/images'));
});

sharp first parameter needs to be a string of a Buffer, gulp won't pass these.
You can use gulp-each to call sharp on each file (or write your own simple method)

Related

ReferenceError: data is not defined when writing a piped gulp script

gulp.task('default', function(done) {
inquirer.prompt([{
type: `input`,
message: `Enter the path`,
default: `./admin/admin.json`,
name: `path`
}]).then(function(answers) {
console.log(answers.path);
console.log('answers');
mydefaultTaskTwo(null, answers.path).pipe(pipedFunction());
done();
})
});
function mydefaultTaskTwo(cb, path) {
let data = '';
try {
data = fs.readFileSync(path, 'utf-8');
} catch (e) {
console.log(`Error: ${e}`);
}
return data;
}
function pipedFunction() {
let object = JSON.parse(data);
object['main'] = 'admin';
data = JSON.stringify(object);
const readable = Readable.from(data)
return readable;
}
I understand that src returns a stream and pipe takes that stream and return a stream, but how do you feed in the stream into the pipedFunction called inside of pipe? I am unsure how it works. I get the following error:
ReferenceError: data is not defined.
Is there something I am misunderstanding about gulp scripts?
Basically you define data as a local scope-level variable and try to reach it from a different scope, where it's undefined. So, you need to make use of the fact that data is returned and pass it, like:
var data = mydefaultTaskTwo(null, answers.path);
data.pipe(pipedFunction(data));

aws s3 bucket image upload in angular6 having problem in return

I am using this approach to upload images to aws s3 bucket:
https://grokonez.com/aws/angular-4-amazon-s3-example-how-to-upload-file-to-s3-bucket
This works fine as an individual task but as far as I rely on the result which is coming a bit later due to async behavior may be. I would like the next task to be executed just after the confirmation.
upload() {
let file: any;
// let urltype = '';
let filename = '';
// let b: boolean;
for (let i = 0 ; i < this.urls.length ; i++) {
file = this.selectedFiles[i];
// urltype = this.urltype[i];
filename = file.name;
const k = uuid() + '.' + filename.substr((filename.lastIndexOf('.') + 1));
this.uploadservice.uploadfile(file, k);
console.log('done');
// console.log('file: ' + file + ' : ' + filename);
// let x = this.userservice.PostImage('test', file);
// console.log('value of ' + x);
}
// return b;
}
fileupload service:
bucket.upload(params, function (err, data) {
if (err) {
console.log('There was an error uploading your file: ', err);
return false;
}
console.log('Successfully uploaded file.', data);
return true;
}).promise();
}
Here, done is getting executed before the file upload is done.
I think you should check out a tutorial for asynchronous programming and try to play around with couple of examples using simple timeouts to get the hang of it and then proceed with more complex things like s3 and aws.
Here is how I suggest you start your journey:
1) Learn the basic concepts of asynchronous programming using pure JS
https://eloquentjavascript.net/11_async.html
2) Play around with your own examples using callbacks and timeouts
3) Replace the callbacks with Promises
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises
4) Do it the "angular" way with rxjs Observables (similar to JS Observable)
http://reactivex.io/rxjs/class/es6/Observable.js~Observable.html
PS: To be more concrete:
Your code fails because the following line is executed in an asynchronous manner. Thus the code will call your uploadfile function and will immedietly continue executing without waiting.
this.uploadservice.uploadfile(file, k);
Once you follow all the points I described above you will be able to do something like this (using a Promise):
this.uploadservice.uploadfile(file, k)
.then( result => {
console.log('Upload finished');
})
.catch(error => {
console.log('Something went wrong');
});

Run long running executable with output in gulp [duplicate]

I have this simple script :
var exec = require('child_process').exec;
exec('coffee -cw my_file.coffee', function(error, stdout, stderr) {
console.log(stdout);
});
where I simply execute a command to compile a coffee-script file. But stdout never get displayed in the console, because the command never ends (because of the -w option of coffee).
If I execute the command directly from the console I get message like this :
18:05:59 - compiled my_file.coffee
My question is : is it possible to display these messages with the node.js exec ? If yes how ? !
Thanks
Don't use exec. Use spawn which is an EventEmmiter object. Then you can listen to stdout/stderr events (spawn.stdout.on('data',callback..)) as they happen.
From NodeJS documentation:
var spawn = require('child_process').spawn,
ls = spawn('ls', ['-lh', '/usr']);
ls.stdout.on('data', function (data) {
console.log('stdout: ' + data.toString());
});
ls.stderr.on('data', function (data) {
console.log('stderr: ' + data.toString());
});
ls.on('exit', function (code) {
console.log('child process exited with code ' + code.toString());
});
exec buffers the output and usually returns it when the command has finished executing.
exec will also return a ChildProcess object that is an EventEmitter.
var exec = require('child_process').exec;
var coffeeProcess = exec('coffee -cw my_file.coffee');
coffeeProcess.stdout.on('data', function(data) {
console.log(data);
});
OR pipe the child process's stdout to the main stdout.
coffeeProcess.stdout.pipe(process.stdout);
OR inherit stdio using spawn
spawn('coffee -cw my_file.coffee', { stdio: 'inherit' });
There are already several answers however none of them mention the best (and easiest) way to do this, which is using spawn and the { stdio: 'inherit' } option. It seems to produce the most accurate output, for example when displaying the progress information from a git clone.
Simply do this:
var spawn = require('child_process').spawn;
spawn('coffee', ['-cw', 'my_file.coffee'], { stdio: 'inherit' });
Credit to #MorganTouvereyQuilling for pointing this out in this comment.
Inspired by Nathanael Smith's answer and Eric Freese's comment, it could be as simple as:
var exec = require('child_process').exec;
exec('coffee -cw my_file.coffee').stdout.pipe(process.stdout);
I'd just like to add that one small issue with outputting the buffer strings from a spawned process with console.log() is that it adds newlines, which can spread your spawned process output over additional lines. If you output stdout or stderr with process.stdout.write() instead of console.log(), then you'll get the console output from the spawned process 'as is'.
I saw that solution here:
Node.js: printing to console without a trailing newline?
Hope that helps someone using the solution above (which is a great one for live output, even if it is from the documentation).
I have found it helpful to add a custom exec script to my utilities that do this.
utilities.js
const { exec } = require('child_process')
module.exports.exec = (command) => {
const process = exec(command)
process.stdout.on('data', (data) => {
console.log('stdout: ' + data.toString())
})
process.stderr.on('data', (data) => {
console.log('stderr: ' + data.toString())
})
process.on('exit', (code) => {
console.log('child process exited with code ' + code.toString())
})
}
app.js
const { exec } = require('./utilities.js')
exec('coffee -cw my_file.coffee')
After reviewing all the other answers, I ended up with this:
function oldSchoolMakeBuild(cb) {
var makeProcess = exec('make -C ./oldSchoolMakeBuild',
function (error, stdout, stderr) {
stderr && console.error(stderr);
cb(error);
});
makeProcess.stdout.on('data', function(data) {
process.stdout.write('oldSchoolMakeBuild: '+ data);
});
}
Sometimes data will be multiple lines, so the oldSchoolMakeBuild header will appear once for multiple lines. But this didn't bother me enough to change it.
child_process.spawn returns an object with stdout and stderr streams.
You can tap on the stdout stream to read data that the child process sends back to Node. stdout being a stream has the "data", "end", and other events that streams have. spawn is best used to when you want the child process to return a large amount of data to Node - image processing, reading binary data etc.
so you can solve your problem using child_process.spawn as used below.
var spawn = require('child_process').spawn,
ls = spawn('coffee -cw my_file.coffee');
ls.stdout.on('data', function (data) {
console.log('stdout: ' + data.toString());
});
ls.stderr.on('data', function (data) {
console.log('stderr: ' + data.toString());
});
ls.on('exit', function (code) {
console.log('code ' + code.toString());
});
Here is an async helper function written in typescript that seems to do the trick for me. I guess this will not work for long-lived processes but still might be handy for someone?
import * as child_process from "child_process";
private async spawn(command: string, args: string[]): Promise<{code: number | null, result: string}> {
return new Promise((resolve, reject) => {
const spawn = child_process.spawn(command, args)
let result: string
spawn.stdout.on('data', (data: any) => {
if (result) {
reject(Error('Helper function does not work for long lived proccess'))
}
result = data.toString()
})
spawn.stderr.on('data', (error: any) => {
reject(Error(error.toString()))
})
spawn.on('exit', code => {
resolve({code, result})
})
})
}

How to use trace.json written by ChromeDriver

I am running a simple node script which starts chromedriver pointed at my website, scrolls to the bottom of the page, and writes the trace to trace.json.
This file is around 30MB.
I can't seem to load this file in chrome://tracing/, which is what I assume I would do in order to view the profile data.
What are my options for making sense of my trace.json file?
Here is my node script, in case that helps clarify what I am up to:
'use strict';
var fs = require('fs');
var wd = require('wd');
var b = wd.promiseRemote('http://localhost:9515');
b.init({
browserName: 'chrome',
chromeOptions: {
perfLoggingPrefs: {
'traceCategories': 'toplevel,disabled-by-default-devtools.timeline.frame,blink.console,disabled-by-default-devtools.timeline,benchmark'
},
args: ['--enable-gpu-benchmarking', '--enable-thread-composting']
},
loggingPrefs: {
performance: 'ALL'
}
}).then(function () {
return b.get('http://www.example.com');
}).then(function () {
// We only want to measure interaction, so getting a log once here
// flushes any previous tracing logs we have.
return b.log('performance');
}).then(function () {
// Smooth scroll to bottom.
return b.execute(`
var height = Math.max(document.documentElement.scrollHeight, document.body.scrollHeight, document.documentElement.clientHeight);
chrome.gpuBenchmarking.smoothScrollBy(height, function (){});
`);
}).then(function () {
// Wait for the above action to complete.
return b.sleep(5000);
}).then(function () {
// Get all the trace logs since last time log('performance') was called.
return b.log('performance');
}).then(function (data) {
// Write the file to disk.
return fs.writeFileSync('trace.json', JSON.stringify(data.map(function (s) {
return JSON.parse(s.message); // This is needed since Selenium outputs logs as strings.
})));
}).fin(function () {
return b.quit();
}).done();
Your script doesn't generate the correct format. The required data for each entry are located in message.message.params.
To generate a trace that can be loaded in chrome://tracing :
var fs = require('fs');
var webdriver = require('selenium-webdriver');
var driver = new webdriver.Builder()
.withCapabilities({
browserName : 'chrome',
loggingPrefs : { performance: 'ALL' },
chromeOptions : {
args: ['--enable-gpu-benchmarking', '--enable-thread-composting'],
perfLoggingPrefs: {
'traceCategories': 'toplevel,disabled-by-default-devtools.timeline.frame,blink.console,disabled-by-default-devtools.timeline,benchmark'
}
}
}).build();
driver.get('https://www.google.com/ncr');
driver.sleep(1000);
// generate a trace file loadable in chrome://tracing
driver.manage().logs().get('performance').then(function (data) {
fs.writeFileSync('trace.json', JSON.stringify(data.map(function (d) {
return JSON.parse(d['message'])['message']['params'];
})));
});
driver.quit();
The same script with python:
import json, time
from selenium import webdriver
driver = webdriver.Chrome(desired_capabilities = {
'loggingPrefs': { 'performance': 'ALL' },
'chromeOptions': {
"args" : ['--enable-gpu-benchmarking', '--enable-thread-composting'],
"perfLoggingPrefs" : {
"traceCategories": "toplevel,disabled-by-default-devtools.timeline.frame,blink.console,disabled-by-default-devtools.timeline,benchmark"
}
}
})
driver.get('https://stackoverflow.com')
time.sleep(1)
# generate a trace file loadable in chrome://tracing
with open(r"trace.json", 'w') as f:
f.write(json.dumps([json.loads(d['message'])['message']['params'] for d in driver.get_log('performance')]))
driver.quit()
Not sure if you know, the recommendation lib for parsing those thing is https://github.com/ChromeDevTools/devtools-frontend
Also, recommended categories are __metadata,benchmark,devtools.timeline,rail,toplevel,disabled-by-default-v8.cpu_profiler,disabled-by-default-devtools.timeline,disabled-by-default-devtools.timeline.frame,blink.user_timing,v8.execute,disabled-by-default-devtools.screenshot
It's very old question, but hope this helps new other guys.

Gulp: passing parameters to task from watch declaration

The problem: I want to maintain 'collections' of files. This will help with build times, and flexibility. for example, everytime i edit my app.js file, I don't want to re-compile all my twitter bootstrap files.
I can certainly achieve this with 2 tasks and 2 watch declarations - the problem is that the tasks are identical save for the files array. Ideally I would like to pass through these as parameters in the watch declaration Is there a way to do something like the following psuedo-code?:
var files = {
scripts: [
'www/assets/scripts/plugins/**/*.js',
'www/assets/scripts/main.js',
],
vendor: [
'vendor/jquery/dist/jquery.js',
'vendor/jqueryui/ui/jquery.ui.widget.js',
'vendor/holderjs/holder.js'
],
};
...
gulp.task('js', ['lint'], function (files, output) {
return gulp.src(files)
.pipe(debug())
.pipe(concat(output))
.pipe(uglify({outSourceMap: true}))
.pipe(gulp.dest(targetJSDir))
.pipe(notify('JS minified'))
.on('error', gutil.log)
});
...
gulp.watch('scripts/**/*.js', ['lint', 'js'], files.scripts, 'app.min.js');
gulp.watch('vendor/**/*.js', ['lint', 'js'], files.vendor, 'vendor.min.js');
Flipping round another way: is to namespace the watch declaration that called the task? That way I could check which watch triggered the task, and conditional those things within the task itself.
the problem is that the tasks are identical save for the files array.
I believe lazypipe (see its gh page) is well
suited to your wants. This was an interesting problem. I'm going to try to answer both what I think you're asking about (which is satisfied by lazypipe) as well as what I think you're probably thinking about or would end up thinking about if you got past the parameterization of pipes issue.
One aspect of what we want is that we don't want to rerun jshint on files that haven't changed. Additionally, we want to keep it DRY, and we want to pick up new files in addition to changed ones.
This is tested and works for me:
var gulp = require('gulp');
var $ = require('gulp-load-plugins')();
var es = require('event-stream');
var lazypipe = require('lazypipe');
var gutil = require('gulp-util');
var path = require('path');
var files = {
scripts: ['src/**/*.js'],
vendor: ['vendor/**/*.js']
};
// sets up a lazy pipe that does jshint related stuff
function getJsMultiPipe(name) {
return lazypipe()
.pipe($.jshint)
.pipe($.jshint.reporter, 'jshint-stylish')
// if you don't want to fail on style errors remove/comment this out:
.pipe($.jshint.reporter, 'fail');
}
// sets up a lazy pipe that does concat and post-concat stuff
function getJsCombinedPipe(groupName, outfile) {
return lazypipe()
.pipe($.concat, outfile)
.pipe($.uglify, {outSourceMap: true})
.pipe(gulp.dest, 'build')
.pipe($.notify, {message: groupName + ' JS minified', onLast: true});
}
// sets up a pipe for the initial build task, combining the above two pipes
function getBuildPipe(groupName, outfile) {
return gulp.src(files[groupName])
.pipe(getJsMultiPipe(groupName)())
.pipe(getJsCombinedPipe(groupName, outfile)());
}
// sets up a watch pipe, such that only the changed file is jshinted,
// but all files are included in the concat steps
function setWatchPipe(groupName, outfile) {
return $.watch({
glob: files[groupName],
name: groupName,
emitOnGlob: false,
emit: 'one'
}, function(file, done) {
return file
.pipe($.debug({title: 'watch -- changed file'}))
.pipe(getJsMultiPipe(groupName)())
// switch context
.pipe(gulp.src(files[groupName]))
.pipe($.debug({title: 'watch -- entire group'}))
.pipe(getJsCombinedPipe(groupName, outfile)())
.pipe($.debug({title: 'watch -- concatted/source-mapped'}))
.pipe($.notify({message: 'JS minified', onLast: true}));
});
}
// task to do an initial full build
gulp.task('build', function() {
return es.merge(
getBuildPipe('scripts', 'app.min.js'),
getBuildPipe('vendor', 'vendor.min.js')
)
.pipe($.notify({message: 'JS minified', onLast: true}));
});
// task to do an initial full build and then set up watches for
// incremental change
gulp.task('watch', ['build'], function(done) {
setWatchPipe('scripts', 'app.min.js');
setWatchPipe('vendor', 'vendor.min.js');
done();
});
My dependencies look like:
"devDependencies": {
"jshint-stylish": "^0.1.5",
"gulp-concat": "^2.2.0",
"gulp-uglify": "^0.2.1",
"gulp-debug": "^0.3.0",
"gulp-notify": "^1.2.5",
"gulp-jshint": "^1.5.3",
"gulp": "^3.6.0",
"gulp-load-plugins": "^0.5.0",
"lazypipe": "^0.2.1",
"event-stream": "^3.1.1",
"gulp-util": "^2.2.14",
"gulp-watch": "^0.5.3"
}
EDIT: I just glanced at this again and I notice these lines:
// switch context
.pipe(gulp.src(files[groupName]))
Be aware that I believe the gulp.src API has changed since I wrote this, and that it currently doesn't switch the context when you pipe things into gulp.src, therefore this spot might require a change. For newer versions of gulp, I think what will happen is that you will be adding to the context, instead and presumably losing a small bit of efficiency.
You could write a wrapper function for tasks to capture parameters and pass it to the task. E.g. (with the help of the lodash library):
// We capture the options in this object. We use gulp.env as a base such that
// options from cli are also passed to the task.
var currentOpts = _.clone(gulp.env);
// Here we define a function that wraps a task such that it can receive
// an options object
function parameterized(taskFunc) {
return function() {
taskFunc.call(null, currentOpts);
}
}
// Here we create a function that can be used by gulp.watch to call
// a parameterized task. It can be passed an object of "task" : {options} pairs
// and it will return a task function that will capture these options
// before invoking the task.
function withArgs(tasks) {
return function() {
_.each(tasks, function (opts, task) {
currentOpts = _.extend(currentOpts, opts);
gulp.run(task);
currentOpts = _.clone(gulp.env);
});
}
}
var files = {
scripts : [ "src/**/*.js"],
vendor : ["vendor/**/*.js"
};
// We pass the task function to parameterized. This will create a wrapper
// function that will pass an options object to the actual task function
gulp.task("js", parameterized(function(opts) {
gulp.src(files[opts.target])
.pipe(concat(opts.output));
}));
gulp.task("watch", function() {
// The withArgs function creates a watch function that invokes
// tasks with an options argument
// In this case it will invoke the js task with the options object
// { target : "scripts", output : "scripts.min.js" }
gulp.watch(files.scripts, withArgs({
js : {
target : "scripts",
output : "scripts.min.js"
}
}));
gulp.watch(files.vendor, withArgs({
js : {
target : "vendor",
output : "vendor.min.js"
}
}));
});
I've faced the same problem - how to pass parameters to a gulp task. It's wierd that this feature is not builtin (it's such a common task to build, for instance, two versions of a package, parametrized task seems like a very DRY solution).
I wanted to make it as simple as possible, so my solution was to dynamically create tasks for an each possible parameter. It works ok if you have a small number of exactly defined values. It won't work for wide range values, like ints or floats.
The task definition is wrapped in a function that takes desired parameter and the parameter is appended to the task's name (with '$' between for convenience).
Your code could look like this:
function construct_js(myname, files, output) {
gulp.task('js$' + myname, ['lint'], function () {
return gulp.src(files)
.pipe(debug())
.pipe(concat(output))
.pipe(uglify({outSourceMap: true}))
.pipe(gulp.dest(targetJSDir))
.pipe(notify('JS minified'))
.on('error', gutil.log)
});
}
construct_js("app", files.scripts, 'app.min.js');
construct_js("vendor", files.vendor, 'vendor.min.js');
gulp.watch('scripts/**/*.js', ['lint', 'js$app']);
gulp.watch('vendor/**/*.js', ['lint', 'js$vendor']);
Or better, with a little change in the data definition, we invoke task generation in a loop (so if you add a new "version" in the configs array it will work right away:
var configs = [
{
name : "app",
output: 'app.min.js',
files: [ 'www/assets/scripts/plugins/**/*.js',
'www/assets/scripts/main.js',
]
},
{
name : "vendor",
output: 'vendor.min.js',
files: [ 'vendor/jquery/dist/jquery.js',
'vendor/jqueryui/ui/jquery.ui.widget.js',
'vendor/holderjs/holder.js'
]
}
];
function construct_js(taskConfig) {
gulp.task('js$' + taskConfig.name, ['lint'], function () {
return gulp.src(taskConfig.files)
.pipe(debug())
.pipe(concat(taskConfig.output))
.pipe(uglify({outSourceMap: true}))
.pipe(gulp.dest(targetJSDir))
.pipe(notify('JS minified'))
.on('error', gutil.log)
});
}
for (var i=0; i < configs.length; i++) {
construct_js(configs[i]);
}
If we use underscore for the last "for":
_(configs).each(construct_js);
I've used this approach in my project with good results.
I'd like to propose some alternatives. Suppose we have a task called build that we would like to conditionally uglify given a certain param.
The two approaches use two watches with a single build task.
Alternative #1:
You can use gulp-exec to fire up a task with parameters.
var exec = require('child_process').exec;
gulp.task('build', function(){
// Parse args here and determine whether to uglify or not
})
gulp.task('buildWithoutUglify' function(){
exec('gulp build --withoutUglify')
})
gulp.task('watch', function(){
gulp.watch(someFilePath, ['buildWithoutUglify'])
})
Please note that this approach is a bit slow since what it does is execute command line gulp.
Alternative #2:
Set a global variable:
var withUglify = false;
gulp.task('build', function(){
// Use something like ``gulp-if`` to conditionally uglify.
})
gulp.task('buildWithoutUglify' function(){
withUglify = true;
gulp.start('build');
})
gulp.task('watch', function(){
gulp.watch(someFilePath, ['buildWithoutUglify'])
})