Error while running stubby4node using Gulp - gulp

I am trying to setup Stubby Server in my JavaScript environment and I am getting the error below.
The relevant part of my Gulpfile:
gulp.task('stubby', function(cb) {
var options = {
callback: function (server, options) {
server.get(1, function (err, endpoint) {
if (!err)
console.log(endpoint);
});
},
stubs: 8000,
tls: 8443,
admin: 8010,
files: [
'*.*'
]
};
stubby(options, cb);
});
The error:
[12:15:03] Starting 'stubby'...
[12:15:03] 'stubby' errored after 17 ms
[12:15:03] Error: Missing error message
at new PluginError (C:\Users\admin\IdeaProjects\myproject\node_modules\gulp-util\lib\PluginError.js:73:28)
at readJSON (C:\Users\admin\IdeaProjects\myproject\node_modules\gulp-stubby-server\index.js:90:15)
at C:\Users\admin\IdeaProjects\myproject\node_modules\gulp-stubby-server\index.js:149:24
at Array.map (native)
at stubbyPlugin (C:\Users\admin\IdeaProjects\myproject\node_modules\gulp-stubby-server\index.js:136:12)
at Gulp.<anonymous> (C:\Users\admin\IdeaProjects\myproject\gulpfile.js:54:5)
at module.exports (C:\Users\admin\IdeaProjects\myproject\node_modules\orchestrator\lib\runTask.js:34:7)
at Gulp.Orchestrator._runTask (C:\Users\admin\IdeaProjects\myproject\node_modules\orchestrator\index.js:273:3)
at Gulp.Orchestrator._runStep (C:\Users\admin\IdeaProjects\myproject\node_modules\orchestrator\index.js:214:10)
at Gulp.Orchestrator.start (C:\Users\admin\IdeaProjects\myproject\node_modules\orchestrator\index.js:134:8)

Searching the gulp-stubby-server codebase for PluginError yields the follow snippet:
function readJSON(filepath, options) {
var src = fs.readFileSync(filepath, options),
result;
if (!options.mute) {
gutil.log(gutil.colors.yellow('Parsing ' + filepath + '...'));
}
try {
result = JSON.parse(src);
return result;
} catch (e) {
throw new gutil.PluginError(PLUGIN_NAME, 'Unable to parse "' + filepath + '" file (' + e.message + ').', e);
}
}
— Source on GitHub
You can tell this is the likely culprit because of the stack trace you see, where the PluginError is coming from readJSON.
The issue
Take note of the catch block. This is caused by one of the files matching your glob (*.*) not being a valid JSON file.
To fix
Ensure you are using the newest version of gulp-stubby-server
Ensure that you are using the correct glob (that is, do you really mean *.*)
Ensure that all the files in the current working directory are valid JSON files

Related

Nativescript cannot read JSON file

I have the following json:
.../src/app/assets/i18n/en.json
{
"TEST": "This is a some test data in a json file"
}
I have the following code:
const folderName = "assets/i18n/en.json";
knownFolders.currentApp().getFile(folderName).readText().then(a => console.log("json file: "+ JSON.parse(a))));
It gives me the following error:
ERROR Error: Uncaught (in promise): SyntaxError: Unexpected end of JSON input
JS: SyntaxError: Unexpected end of JSON input
JS: at JSON.parse (<anonymous>)
I have tried:
Set folderName to "/assets/i18n/en.json"
Rebuild, and reconnect my testing phone
Use HTTP and with this.http.get("~/app/assets/i18n/en.json").toPromise().then(res => console.log("http???", res)).catch(err => console.log("err:", err));
print the object file without parsing it (it's empty...)
But the error stays the same.
update1
it seems, that the file, sadly, does not exists.
this code:
const folderName = "assets/i18n";
const fileName = "en.json";
console.log("exists?", File.exists(folderName + "/" + fileName));
returns a false.
Although see the picture provided from the project files.
(the code proided is in app.component.ts, in the constructor of AppComponent)
What can be the problem here?
update2:
updated my webpack.config.js to copy .json files:
new CopyWebpackPlugin([
{ from: { glob: "fonts/**" } },
{ from: { glob: "**/*.jpg" } },
{ from: { glob: "**/*.json" } },
{ from: { glob: "**/*.png" } },
], { ignore: [`${relative(appPath, appResourcesFullPath)}/**`] }),
but still no luck. The file still does not extist...
Update3:
This is getting ridiculous... the file can be imported as a standard json file, but the Nativescript lib still does not see it.
import { File } from "tns-core-modules/file-system";
import config from "./assets/i18n/hu.json";
....
const folderName = "./assets/i18n";
const fileName = "hu.json";
console.log("config:", config.test)
console.log("exists?", File.exists(folderName + "/" + fileName));
this produces the following output:
JS: config: This is a translated line
JS: exists? false
AFAIK, the path must be split; you can't request a file with a relative path directly.
const folderName = "assets/i18n";
const fileName = "en.json";
console.log(
knownFolders.currentApp().getFolder(folderName).getFile(fileName).readTextSync(),
);
I face similar situation while working on an app. the issue was that the file permissions was not granted.
other way around which worked perfectly, without needing any permission was to fetch the JSON from a url and work through it.

gulp-protractor error with chrome v54 / web driver v2.25

Due to the latest update of chrome (v54) we've noticed our protractor tests failing. We attempted to update to the latest version of gulp-protractor (v3.0.0) which in turn downloads the latest web driver (v2.25) to resolve the issue but unfortunately a new error occurs we've been unable to resolve.
Everything worked fine before chrome's update.
Our protractor configuration is as follows:
exports.config = {
// Capabilities to be passed to the webdriver instance.
capabilities: {
'browserName': 'chrome'
},
onPrepare: function () {
var fs = require('fs');
var testDir = 'testresults/';
if (!fs.existsSync(testDir)) {
fs.mkdirSync(testDir);
}
var jasmineReporters = require('jasmine-reporters');
// returning the promise makes protractor wait for the reporter config before executing tests
return browser.getProcessedConfig().then(function () {
// you could use other properties here if you want, such as platform and version
var browserName = 'browser';
browser.getCapabilities().then(function (caps) {
browserName = caps.caps_.browserName.replace(/ /g, "_");
var junitReporter = new jasmineReporters.JUnitXmlReporter({
consolidateAll: true,
savePath: testDir,
// this will produce distinct xml files for each capability
filePrefix: 'test-protractor-' + browserName,
modifySuiteName: function (generatedSuiteName) {
// this will produce distinct suite names for each capability,
// e.g. 'firefox.login tests' and 'chrome.login tests'
return 'test-protractor-' + browserName + '.' + generatedSuiteName;
}
});
jasmine.getEnv().addReporter(junitReporter);
});
});
},
baseUrl: 'http://localhost:3000',
// Spec patterns are relative to the current working directory when
// protractor is called.
specs: [paths.e2e + '/**/*.js'],
// Options to be passed to Jasmine-node.
jasmineNodeOpts: {
showColors: true,
defaultTimeoutInterval: 30000
}
};
The error is:
[13:27:13] E/launcher - Error: Error
at C:\ws\node_modules\protractor\built\util.js:55:37
at _rejected (C:\ws\node_modules\q\q.js:844:24)
at C:\ws\node_modules\q\q.js:870:30
at Promise.when (C:\ws\node_modules\q\q.js:1122:31)
at Promise.promise.promiseDispatch (C:\ws\node_modules\q\q.js:788:41)
at C:\ws\node_modules\q\q.js:604:44
at runSingle (C:\ws\node_modules\q\q.js:137:13)
at flush (C:\ws\node_modules\q\q.js:125:13)
at nextTickCallbackWith0Args (node.js:420:9)
at process._tickCallback (node.js:349:13)
[13:27:13] E/launcher - Process exited with error code 100
onPrepare is being evaluated in built/util.js in the runFilenameOrFn_ function. The stacktrace unfortunately is not helpful but what this means is that onPrepare has errors. Looking at your onPrepare method, the error is made when assigning the browserName from the browser capabilities. In your code, caps.caps_ is actually undefined. Because caps.caps_ is undefined, caps.caps_.browserName is throwing an error. The capabilities object should be accessed as the following:
browser.getCapabilities().then(capabilities => {
let browserName = capabilities.browserName.replace(/ /g, "_");

Run long running executable with output in gulp [duplicate]

I have this simple script :
var exec = require('child_process').exec;
exec('coffee -cw my_file.coffee', function(error, stdout, stderr) {
console.log(stdout);
});
where I simply execute a command to compile a coffee-script file. But stdout never get displayed in the console, because the command never ends (because of the -w option of coffee).
If I execute the command directly from the console I get message like this :
18:05:59 - compiled my_file.coffee
My question is : is it possible to display these messages with the node.js exec ? If yes how ? !
Thanks
Don't use exec. Use spawn which is an EventEmmiter object. Then you can listen to stdout/stderr events (spawn.stdout.on('data',callback..)) as they happen.
From NodeJS documentation:
var spawn = require('child_process').spawn,
ls = spawn('ls', ['-lh', '/usr']);
ls.stdout.on('data', function (data) {
console.log('stdout: ' + data.toString());
});
ls.stderr.on('data', function (data) {
console.log('stderr: ' + data.toString());
});
ls.on('exit', function (code) {
console.log('child process exited with code ' + code.toString());
});
exec buffers the output and usually returns it when the command has finished executing.
exec will also return a ChildProcess object that is an EventEmitter.
var exec = require('child_process').exec;
var coffeeProcess = exec('coffee -cw my_file.coffee');
coffeeProcess.stdout.on('data', function(data) {
console.log(data);
});
OR pipe the child process's stdout to the main stdout.
coffeeProcess.stdout.pipe(process.stdout);
OR inherit stdio using spawn
spawn('coffee -cw my_file.coffee', { stdio: 'inherit' });
There are already several answers however none of them mention the best (and easiest) way to do this, which is using spawn and the { stdio: 'inherit' } option. It seems to produce the most accurate output, for example when displaying the progress information from a git clone.
Simply do this:
var spawn = require('child_process').spawn;
spawn('coffee', ['-cw', 'my_file.coffee'], { stdio: 'inherit' });
Credit to #MorganTouvereyQuilling for pointing this out in this comment.
Inspired by Nathanael Smith's answer and Eric Freese's comment, it could be as simple as:
var exec = require('child_process').exec;
exec('coffee -cw my_file.coffee').stdout.pipe(process.stdout);
I'd just like to add that one small issue with outputting the buffer strings from a spawned process with console.log() is that it adds newlines, which can spread your spawned process output over additional lines. If you output stdout or stderr with process.stdout.write() instead of console.log(), then you'll get the console output from the spawned process 'as is'.
I saw that solution here:
Node.js: printing to console without a trailing newline?
Hope that helps someone using the solution above (which is a great one for live output, even if it is from the documentation).
I have found it helpful to add a custom exec script to my utilities that do this.
utilities.js
const { exec } = require('child_process')
module.exports.exec = (command) => {
const process = exec(command)
process.stdout.on('data', (data) => {
console.log('stdout: ' + data.toString())
})
process.stderr.on('data', (data) => {
console.log('stderr: ' + data.toString())
})
process.on('exit', (code) => {
console.log('child process exited with code ' + code.toString())
})
}
app.js
const { exec } = require('./utilities.js')
exec('coffee -cw my_file.coffee')
After reviewing all the other answers, I ended up with this:
function oldSchoolMakeBuild(cb) {
var makeProcess = exec('make -C ./oldSchoolMakeBuild',
function (error, stdout, stderr) {
stderr && console.error(stderr);
cb(error);
});
makeProcess.stdout.on('data', function(data) {
process.stdout.write('oldSchoolMakeBuild: '+ data);
});
}
Sometimes data will be multiple lines, so the oldSchoolMakeBuild header will appear once for multiple lines. But this didn't bother me enough to change it.
child_process.spawn returns an object with stdout and stderr streams.
You can tap on the stdout stream to read data that the child process sends back to Node. stdout being a stream has the "data", "end", and other events that streams have. spawn is best used to when you want the child process to return a large amount of data to Node - image processing, reading binary data etc.
so you can solve your problem using child_process.spawn as used below.
var spawn = require('child_process').spawn,
ls = spawn('coffee -cw my_file.coffee');
ls.stdout.on('data', function (data) {
console.log('stdout: ' + data.toString());
});
ls.stderr.on('data', function (data) {
console.log('stderr: ' + data.toString());
});
ls.on('exit', function (code) {
console.log('code ' + code.toString());
});
Here is an async helper function written in typescript that seems to do the trick for me. I guess this will not work for long-lived processes but still might be handy for someone?
import * as child_process from "child_process";
private async spawn(command: string, args: string[]): Promise<{code: number | null, result: string}> {
return new Promise((resolve, reject) => {
const spawn = child_process.spawn(command, args)
let result: string
spawn.stdout.on('data', (data: any) => {
if (result) {
reject(Error('Helper function does not work for long lived proccess'))
}
result = data.toString()
})
spawn.stderr.on('data', (error: any) => {
reject(Error(error.toString()))
})
spawn.on('exit', code => {
resolve({code, result})
})
})
}

How to write and read from csv or json file in meteor

I am using papaParse
and I want to save result of this package into file and users can download it. what is the best way to do this? also I am use this node.js code for do it
var csv = Papa.unparse(Users.find().fetch());
console.log("csv : " + JSON.stringify(csv)); // get csv format (not in file)
fs.writeFile("meteorProject/public/", csv, function(err) {
if(err) {
return console.log(err);
}
console.log("The file was saved!");
});
but give this error
{ [Error: EISDIR: illegal operation on a directory, open 'meteorProject/public/']
I20160907-13:00:26.970(4.5)? errno: -21,
I20160907-13:00:26.970(4.5)? code: 'EISDIR',
I20160907-13:00:26.970(4.5)? syscall: 'open',
I20160907-13:00:26.970(4.5)? path: 'meteorProject/public/' }
and how can resolve it ??
thanks :-)
This is the issue related to directory please check the directory path you are given here. i.e. - "meteorProject/public/".
So what you need to change is just the directory name you are using .
As i tried with my directory and its running well.
Or i suggest you to try with the fileName.csv as well while saving the file
like:- meteorProject/public/test.csv
or just completely change the path of directory like as i tried with my ubuntu machine and its running well.
var userArray = Users.find().fetch();
var data = Papa.unparse(userArray);
console.log("data is....");
console.log(data);
fs.writeFile('/home/parveen/test/test.csv',data,function(err,res){
if(err){
console.log("err while saving");
console.log(err)
}
else{
console.log("File saved");
console.log(res);
}
});
The above code save the file in the test folder via test.csv name.
Please check and let me know if you again facing any issue.
If you want to know about your error in depth then please see the link:-
https://github.com/bekk/grunt-retire/issues/2
Hope this would help!
Thanks
finally I used this package: Meteor-Files
for solving this problem!
this is sample code:
export(){
var csv = Papa.unparse(Users.find({},{fields:{_id:0}}).fetch());
Exports.write(csv,{
fileName: 'backup'+ new Date().getTime() +'.csv',
type: 'text/csv'
}, function (error, fileRef) {
if (error) {
throw error;
} else {
console.log(fileRef.name + ' is successfully saved to FS. _id: ' + fileRef._id);
}
});
},
Exports is collection .This is the function that helped me.

couchdb - Import json file

I have a file with data in the json format. The data is odd in that the rows have varying subsets of a set of properties that are not all known in advance (over time they will build up). For example:
[{"firstName":"Joe","education":"highschool","sex":"male"},
{"lastName":"Edwards","address":"ohio","sex":"male"},
{"favoriteSport":"cycling","bicycle":"raleigh","ownsBoat":"yes","ownsDog":"yes","ownsHouse":"yes"}]
A large amount of data exists already in a file so I would like to import it in to couchdb rather than enter the data item by item. I followed the procedures from a post here but, while a db was created, it was empty. I used:
curl -X PUT -d #../Data/data.json http://127.0.0.1:5984/test_import
UPDATE: Since I'm working with nodejs (newbie), I thought I'd try using 'cradle'. My thought was to take the import the data as an array, and bulk load that using 'cradle's dbsave(). But using the following:
var fs = require('fs');
var cradle = require('cradle');
var data = fs.readFile( '../Data/data.json', function (err, data) {
if (err) {
throw err;
}
.
.
.
makeDB(bigdata,'test_import'); // where 'bigdata' is an array of json objects/couchdb 'documents'
});
function makeDB (p,filename) {
var db = new(cradle.Connection)().database(filename);
console.log(db);
db.save(p, function(err,res) {
if (err) {
console.log(err);
} else {
console.log('Success!');
}
});
};
The latter seems to work!! A database is created and filled but it does however throw the following errors:
k:\nodejs\node_modules\cradle\lib\cradle.js:198
return callback(body);
^
TypeError: undefined is not a function
OR
k:\nodejs\node_modules\cradle\lib\cradle.js:201
callback(null, self.options.raw ? body : new cradle.Response(body, res
^
TypeError: undefined is not a function
at Request.cradle.Connection.request [as _callback] (k:\nodejs\node_modules\cradle\lib\cradle.
js:201:9)
at Request.init.self.callback (k:\nodejs\node_modules\request\main.js:120:22)
at Request.EventEmitter.emit (events.js:91:17)
at Request.<anonymous> (k:\nodejs\node_modules\request\main.js:633:16)
at Request.EventEmitter.emit (events.js:88:17)
at IncomingMessage.Request.start.self.req.self.httpModule.request.buffer (k:\nodejs\node_modul
es\request\main.js:595:14)
at IncomingMessage.EventEmitter.emit (events.js:115:20)
at IncomingMessage._emitEnd (http.js:366:10)
at HTTPParser.parserOnMessageComplete [as onMessageComplete] (http.js:149:23)
at Socket.socketOnData [as ondata] (http.js:1366:20)
[SOLVED]
The answers to my questions are that yes, of course, couchdb is perfectly suited to that kind of data. The easiest way I found to do the bulk import with node.js is using cradle (whose creator provided solution to problem). The preceding code works error free with the following changes to the makeDB function:
// Take the array of objects and create a couchdb database
function makeDB (data,filename) {
var db = new(cradle.Connection)().database(filename);
//console.log(db);
db.create(function(err){
if (err) console.log(err);
});
db.save(data, function(err) {
if (err) console.log(err);
console.log(filename + ' is created.');
});
};