Loss and mean squared error values not showing during training performance. Not getting predicted line - regression

Currently we are trying to input uber data that includes time of day and ride fare into our TensorFlow.js model. We noticed that when we ran the model on the browser, the points are showing up on our scatterplot but during the training the loss and mean squared error values are not showing up and most importantly our model is not displaying a prediction line.
var userData = [
{
"City": "San Francisco",
"Product_Type": "UberEATS Marketplace",
"Trip_or_Order_Status": "COMPLETED",
"Request_Time": "2019-06-16 04:10:44 +0000 UTC",
"Begin_Trip_Time": "2019-06-16 04:44:40 +0000 UTC",
"Begin_Trip_Lat": "37.7352602",
"Begin_Trip_Lng": "-122.4203465",
"Begin_Trip_Address": "",
"Dropoff_Time": "2019-06-16 04:44:40 +0000 UTC",
"Dropoff_Lat": "37.7352602",
"Dropoff_Lng": "-122.4203465",
"Dropoff_Address": "",
"Distance_miles": "2.04",
"Fare_Amount": "32.34",
"Fare_Currency": "USD"
}...]
async function getData() {
const carsData = await userData;
// Here we map out the values for each car and filter out the list item that do not have an day or a pay value
const cleaned = carsData.map(car => ({
timeInMinutes: calculateMins(car.Request_Time),
pay_rate: normalizeUberPrice(car.Distance_miles, car.Fare_Amount),
}))
.filter(car => (car.day != null && car.pay != null));
return cleaned;
}
async function run() {
const data = await getData();
const values = data.map(d => ({
x: d.day,
y: d.pay,
}));
tfvis.render.scatterplot(
{ name: 'Horsepower v MPG' },
{ values },
{
xAxisDomain: [0, 1600],
yAxisDomain: [0,10],
xLabel: 'Day',
yLabel: 'Pay',
height: 300
}
);
const model = createModel();
tfvis.show.modelSummary({ name: 'Model Summary' }, model);
// Convert the data to a form we can use for training.
const tensorData = convertToTensor(data);
console.log(tensorData)
const { inputs, labels } = tensorData;
// Train the model
await trainModel(model, inputs, labels);
console.log('Done Training');
testModel(model, data, tensorData);
}
function createModel() {
const model = tf.sequential();
model.add(tf.layers.dense({ inputShape: [1], units: 25, useBias: true }));
model.add(tf.layers.dense({ units: 50, activation: "sigmoid" }));
model.add(tf.layers.dense({ units: 1, useBias: true }));
return model;
}
function convertToTensor(data) {
return tf.tidy(() => {
tf.util.shuffle(data);
const inputs = data.map(d => d.pay)
const labels = data.map(d => d.day);
const inputTensor = tf.tensor2d(inputs, [inputs.length, 1]);
const labelTensor = tf.tensor2d(labels, [labels.length, 1]);
//Step 3. Normalize the data to the range 0 - 1 using min-max scaling
const inputMax = inputTensor.max();
const inputMin = inputTensor.min();
const labelMax = labelTensor.max();
const labelMin = labelTensor.min();
const normalizedInputs = inputTensor.sub(inputMin).div(inputMax.sub(inputMin));
const normalizedLabels = labelTensor.sub(labelMin).div(labelMax.sub(labelMin));
return {
inputs: normalizedInputs,
labels: normalizedLabels,
// Return the min/max bounds so we can use them later.
inputMax,
inputMin,
labelMax,
labelMin,
}
});
}
async function trainModel(model, inputs, labels) {
model.compile({
optimizer: tf.train.adam(),
loss: tf.losses.meanSquaredError,
metrics: ['mse'],
});
const batchSize = 32;
const epochs = 30;
callbacks: tfvis.show.fitCallbacks(
{ name: 'Training Performance' },
['loss', 'mse'],
{
xAxisDomain: [0, 100],
yAxisDomain: [0,1],
height: 200,
callbacks: ['onEpochEnd'] }
// ',onBatchEnd'
),
history: tfvis.show.history({
name: 'History'},
history,
["loss","mse"])
});
}
function testModel(model, inputData, normalizationData) {
const { inputMax, inputMin, labelMin, labelMax } = normalizationData;
const [xs, preds] = tf.tidy(() => {
const xs = tf.linspace(0, 1, 100);
const preds = model.predict(xs.reshape([100, 1]));
const unNormXs = xs
.mul(inputMax.sub(inputMin))
.add(inputMin);
const unNormPreds = preds
.mul(labelMax.sub(labelMin))
.add(labelMin);
return [unNormXs.dataSync(), unNormPreds.dataSync()];
});
const predictedPoints = Array.from(xs).map((val, i) => {
return { x: val, y: preds[i] }
});
const originalPoints = inputData.map(d => ({
x: d.pay, y: d.day,
}));
console.log("ORIGINAL POINTS:")
console.log(originalPoints)
tfvis.render.scatterplot(
{ name: 'Model Predictions vs Original Data' },
{ values: [originalPoints, predictedPoints], series: ['original', 'predicted'] },
{
xAxisDomain: [0,10],
yAxisDomain: [0,1600],
xLabel: 'Horsepower',
yLabel: 'MPG',
height: 1000
}
);
}
document.addEventListener('DOMContentLoaded', run);
Basically we want to see a predicted line for our data but were not getting anything back.
It worked when we used data like this:
var userData = [{
day: 1
pay: 20
},...]

The data processing is not well performed. Thus, the values used for prediction contain NaN and Infinity. As a result, the error computed by model.fit is NaN and could therefore not be displayed on the chart of tfjs-vis.
The filtering
.filter(car => (car.day != null && car.pay != null));
is not removing NaN and Infinity. Instead, this condition can be used:
.filter(car => isFinite(car.pay + car.day) && !isNaN(car.pay + car.day));
Though, the NaN and Infinity are found within the values of car.day, here a general filtering is made over car.pay and car.day - thus the additional operation - to make sure that these values will not appear anywhere in the cleaned data.
here you can see how to display the loss.

Related

Error while call multiple animated sprite

Here I created several classes for each sprite. Examples are DeviceOne and DeviceTwo. All went well when only creating and loading 1 sprite. However when I call DeviceTwo and reload dataVizExtension I always get the following error.
Uncaught TypeError: Cannot read properties of undefined (reading 'dbId')
This is the code for DeviceOne, basically class for DeviceTwo is same. The different only the sprite images.
export class DeviceOne {
constructor(viewer, dataVizExtn) {
this.viewer = viewer;
this.dataVizExtn = null;
this.DataVizCore = null;
this.viewableType = null;
this.viewableData = null;
this.baseURL = "http://localhost:3000/assets/images/sprite/";
this.sensorPositions = {
Dasloop: {
x: 10,
y: -3,
z: 20,
},
Warning: {
x: 0,
y: 10,
z: 3,
},
};
this.dasloops = [
"img_gps_dasloop_online.svg",
"img_gps_dasloop_online-1.svg",
"img_gps_dasloop_online-2.svg",
"img_gps_dasloop_online-3.svg",
];
this.warnings = ["ic_warning.svg", "ic_warning-2.svg"];
this.startAnim1 = 0;
this.startAnim2 = 0;
this.startAnim3 = 0;
this.infoChart = new InfoChart(this.viewer, this.options);
}
/**
* #return {ViewableData} resulting viable data that contains all viewables (icons)
*/
async onSpriteLoadedToScene() {
this.dataVizExtn = await this.viewer.loadExtension(
"Autodesk.DataVisualization"
);
const dataVizCore = Autodesk.DataVisualization.Core;
this.onSpriteHovering = this.onSpriteHovering.bind(this);
this.viewer.addEventListener(
dataVizCore.MOUSE_HOVERING,
this.onSpriteHovering
);
this.onSpriteClicked = this.onSpriteClicked.bind(this);
this.viewer.addEventListener(dataVizCore.MOUSE_CLICK, this.onSpriteClicked);
const viewableType = dataVizCore.ViewableType.SPRITE;
const spriteColor = new THREE.Color(0xffffff);
const highlightedColor = new THREE.Color(0xe0e0ff);
const spriteIconUrl = `${this.baseURL}${"img_gps_dasloop_online.svg"}`;
const dasloopStyles = new dataVizCore.ViewableStyle(
viewableType,
spriteColor,
spriteIconUrl,
highlightedColor,
`${this.baseURL}${this.dasloops[0]}`,
this.dasloops.map((dasloop) => `${this.baseURL}${dasloop}`)
);
const warningStyles = new dataVizCore.ViewableStyle(
viewableType,
spriteColor,
`${this.baseURL}${"ic_warning.svg"}`,
highlightedColor,
`${this.baseURL}${this.warnings[0]}`,
this.warnings.map((warning) => `${this.baseURL}${warning}`)
);
this.viewableData = new dataVizCore.ViewableData();
this.viewableData.spriteSize = 30;
const simulationData = [
{ position: { x: 0, y: 0, z: 10 } },
{ position: { x: 5, y: -3, z: 10 } },
];
const warningData = [{ position: { x: 0, y: 0, z: 0 } }];
simulationData.forEach((myData, index) => {
const dbId = 10 + index;
const position = myData.position;
const viewable = new dataVizCore.SpriteViewable(
position,
dasloopStyles,
dbId
);
this.viewableData.addViewable(viewable);
});
warningData.forEach((myData, index) => {
const dbId = 15 + index;
const position = myData.position;
const viewableWarning = new dataVizCore.SpriteViewable(
position,
warningStyles,
dbId
);
this.viewableData.addViewable(viewableWarning);
});
await this.viewableData.finish();
this.dataVizExtn.addViewables(this.viewableData);
this.spriteToUpdate = this.dataVizExtn.viewableData.viewables.map(
(sprite) => sprite.dbId
);
this.animate = setInterval(this.getAnimateSprite.bind(this), 500);
}
getAnimateSprite() {
this.dataVizExtn.invalidateViewables(this.spriteToUpdate, (viewable) => {
switch (viewable.dbId) {
case 10:
return {
url: `${this.baseURL}${
this.dasloops[this.startAnim1++ % this.dasloops.length]
}`,
};
case 15:
return {
url: `${this.baseURL}${
this.warnings[this.startAnim2++ % this.warnings.length]
}`,
};
case 11:
return {
url: `${this.baseURL}${
this.dasloops[this.startAnim3++ % this.dasloops.length]
}`,
};
default:
break;
}
});
}
Sorry, it's a little hard for me to tell where the error came from with the above code snippet, but you mentioned reload dataVizExtension. So, I would advise you to check your this.animate = setInterval(this.getAnimateSprite.bind(this), 500);.
Did you clear the setInterval call by clearInterval(this.animate) while unloading and before reloading your extension?

How to get xyz cordinates of forge-viewer onClick event?

I am following this (https://stackblitz.com/edit/angular-forge-viewer-pjyarf?file=app%2Fapp.component.ts) link to get the xyz cordinates of viewer onclick event in Angular.I cant get the viewer properly. Please help to get the solution.
ngOnInit() {
this.mainFunction();
}
mainFunction(){
this.viewerOptions = {
initializerOptions: {
env: 'AutodeskProduction',
getAccessToken: (
onGetAccessToken: (token: string, expire: number) => void
) => {
const expireTimeSeconds = 60 * 30;
onGetAccessToken(ACCESS_TOKEN, expireTimeSeconds);
},
api: 'derivativeV2',
enableMemoryManagement: true,
},
onViewerInitialized: (args: ViewerInitializedEvent) => {
args.viewerComponent.DocumentId = DOCUMENT_URN;
this.viewer=args.viewer;
},
};
}
selectionChanged(e){
const state = this.viewer.viewerState.getState({ viewport: true });
const globalOffset = this.viewer.model.myData.globalOffset
const currentPosition = new THREE.Vector3().fromArray( state.viewport.eye );
const originPosition = currentPosition.clone().add( globalOffset );
console.log("hiiihhaaaaaa:",originPosition);
}

Getting usable numbers from Nested Array Json

My data that I'm given is formulated like the following. I'm struggling to get any usable data out of it using reduce or map
const data = [
{
id: 25,
status: 1,
description: "No Description",
length: 4,
data: [
{
id: 43,
comment: "Comment1",
eventTimestamp: 1541027189000,
intensity: 29
},
{
comment: "Comment2",
eventTimestamp: 1541027191000,
intensity: 33
},
{
id: 45,
comment: "Comment3",
eventTimestamp: 1541027193000,
intensity: 30
}
],
tTypes: [
{
id: 3,
label: "Johnny",
certainty: "TEST",
comment: "Test Purposes Only",
icon: "bottle",
number: 0
}
]
}
];
I've tried flatting, I've tried iterating the JSON twice and I just seem to end up with either "NaN" or Undefined. I'd like to be able to order them in time order (using time stamp), get the mix/max/ave from the intensity values and more. I have that figured out for the length which is a level higher, but just can't seem to figure out the rest. Can someone point me in the right direction?
export default function App() {
let tTypesArray = data.map((a) => a.tTypes);
let Walker = tTypesArray.reduce((a, tTypes) => tTypes.label === "Johnny" ? ++a : a, 0);
console.log(Walker);
console.log(tTypesArray[0].label);
console.log([].concat(...data)
.map(data => data.tTypes.number)
.reduce((a, b) => a + b))
console.log([].concat(...data).reduce((a, { tTypes: { id }}) => id, 0))
return <div className="App">ARG!</div>;
}
Are some of the examples I've tried.
https://codesandbox.io/s/purple-cache-ivz1y?file=/src/App.js
Is the link to the sandbox.
What I understood from you question is that you need to loop data and for each element in data you want to extract values and do some calculations.
First you need to loop your data input. I will use Array.forEach:
data.forEach(element => { ... })
Now that we have a loop we can access each element property and extract the information we want. For instance lets say you want to sort the comments by timestamp in ascending order:
const sortedComments = element.data.sort((a, b) => a.eventTimestamp - b.eventTimestamp);
console.log(sortedComments)
Now let's say you want the min, max, and average intensity from the comments. There are several ways to get it. Here is an algorithm for that:
let min = Infinity;
let max = -Infinity;
let sum = 0;
for(comment of sortedComments) {
if(comment.intensity < min) {
min = comment.intensity;
}
if(comment.intensity > max) {
max = comment.intensity;
}
sum += comment.intensity;
}
const avg = sum / sortedComments.length;
console.log({min, max, avg})
Putting it all together:
const data = [
{
id: 25,
confirmationStatus: 1,
description: "No Description",
length: 4,
data: [
{
id: 43,
comment: "Comment1",
eventTimestamp: 1541027189000,
intensity: 29
},
{
comment: "Comment2",
eventTimestamp: 1541027191000,
intensity: 33
},
{
id: 45,
comment: "Comment3",
eventTimestamp: 1541027193000,
intensity: 30
}
],
tTypes: [
{
id: 3,
label: "Johnny",
certainty: "TEST",
comment: "Test Purposes Only",
icon: "bottle",
number: 0
}
]
}
];
data.forEach(element => {
const sortedComments = element.data.sort((a, b) => a.eventTimestamp - b.eventTimestamp);
console.log(sortedComments);
let min = Infinity;
let max = -Infinity;
let sum = 0;
for(comment of sortedComments) {
if(comment.intensity < min) {
min = comment.intensity;
}
if(comment.intensity > max) {
max = comment.intensity;
}
sum += comment.intensity;
}
const avg = sum / sortedComments.length;
console.log({min, max, avg});
let walker = element.tTypes.reduce(
(a, tType) => (tType.label === "Johnny" ? ++a : a), 0
);
console.log(walker)
});
I hope it puts you in the right direction.

ProducerStream producing only to single partition

I am trying to produce some messages to a single topic having 2 partitions. All the messages are going to partition number 2 only.
I would expect that a producer stream would distribute the messages across all partitions.
const kafka = require('kafka-node')
const { Transform } = require('stream');
const _ = require('lodash');
const client = new kafka.KafkaClient({ kafkaHost: 'localhost:9092' })
, streamproducer = new kafka.ProducerStream({kafkaClient: client});
const stdinTransform = new Transform({
objectMode: true,
decodeStrings: true,
transform (text, encoding, callback) {
let num = parseInt(text);
let message = { num: num, method: 'two' }
console.log('pushing message')
callback(null, {
topic: 'topic356',
messages: JSON.stringify(message)
});
}
});
stdinTransform.pipe(streamproducer);
function send() {
var message = new Date().toString();
stdinTransform.write([{ messages: [message] }]);
}
setInterval(send, 100);
ConsumerGroup:
var consumerOptions = {
kafkaHost: '127.0.0.1:9092',
groupId: 'ExampleTestGroup',
sessionTimeout: 15000,
protocol: ['roundrobin'],
fromOffset: 'latest' // equivalent of auto.offset.reset valid values are 'none', 'latest', 'earliest'
};
var topics = 'topic356';
var consumerGroup = new ConsumerGroup(Object.assign({ id: 'consumer1' }, consumerOptions), topics);
consumerGroup.on('data', onMessage);
var consumerGroup2 = new ConsumerGroup(Object.assign({ id: 'consumer2' }, consumerOptions), topics);
consumerGroup2.on('data', onMessage);
consumerGroup2.on('connect', function () {
setTimeout(function () {
consumerGroup2.close(true, function (error) {
console.log('consumer2 closed', error);
});
}, 25000);
});
function onMessage (message) {
console.log(
` partition: ${message.partition} `
);
}
Do you produce messages with a key? In Kafka, messages with the same key are published to the same partition.
use partitionerType in options, the default is 0,
Partitioner type (default = 0, random = 1, cyclic = 2, keyed = 3, custom = 4), default 0
new kafka.Producer(new kafka.KafkaClient({ kafkaHost: 'localhost:9092' }),{
partitionerType:1
});
https://github.com/SOHU-Co/kafka-node/issues/1094

Transform Request to Autoquery friendly

We are working with a 3rd party grid (telerik kendo) that has paging/sorting/filtering built in. It will send the requests in a certain way when making the GET call and I'm trying to determine if there is a way to translate these requests to AutoQuery friendly requests.
Query string params
Sort Pattern:
sort[{0}][field] and sort[{0}][dir]
Filtering:
filter[filters][{0}][field]
filter[filters][{0}][operator]
filter[filters][{0}][value]
So this which is populated in the querystring:
filter[filters][0][field]
filter[filters][0][operator]
filter[filters][0][value]
would need to be translated to.
FieldName=1 // filter[filters][0][field]+filter[filters][0][operator]+filter[filters][0][value] in a nutshell (not exactly true)
Should I manipulate the querystring object in a plugin by removing the filters (or just adding the ones I need) ? Is there a better option here?
I'm not sure there is a clean way to do this on the kendo side either.
I will explain the two routes I'm going down, I hope to see a better answer.
First, I tried to modify the querystring in a request filter, but could not. I ended up having to run the autoqueries manually by getting the params and modifying them before calling AutoQuery.Execute. Something like this:
var requestparams = Request.ToAutoQueryParams();
var q = AutoQueryDb.CreateQuery(requestobject, requestparams);
AutoQueryDb.Execute(requestobject, q);
I wish there was a more global way to do this. The extension method just loops over all the querystring params and adds the ones that I need.
After doing the above work, I wasn't very happy with the result so I investigated doing it differently and ended up with the following:
Register the Kendo grid filter operations to their equivalent Service Stack auto query ones:
var aq = new AutoQueryFeature { MaxLimit = 100, EnableAutoQueryViewer=true };
aq.ImplicitConventions.Add("%neq", aq.ImplicitConventions["%NotEqualTo"]);
aq.ImplicitConventions.Add("%eq", "{Field} = {Value}");
Next, on the grid's read operation, we need to reformat the the querystring:
read: {
url: "/api/stuff?format=json&isGrid=true",
data: function (options) {
if (options.sort && options.sort.length > 0) {
options.OrderBy = (options.sort[0].dir == "desc" ? "-" : "") + options.sort[0].field;
}
if (options.filter && options.filter.filters.length > 0) {
for (var i = 0; i < options.filter.filters.length; i++) {
var f = options.filter.filters[i];
console.log(f);
options[f.field + f.operator] = f.value;
}
}
}
Now, the grid will send the operations in a Autoquery friendly manner.
I created an AutoQueryDataSource ts class that you may or may not find useful.
It's usage is along the lines of:
this.gridDataSource = AutoQueryKendoDataSource.getDefaultInstance<dtos.QueryDbSubclass, dtos.ListDefinition>('/api/autoQueryRoute', { orderByDesc: 'createdOn' });
export default class AutoQueryKendoDataSource<queryT extends dtos.QueryDb_1<T>, T> extends kendo.data.DataSource {
private constructor(options: kendo.data.DataSourceOptions = {}, public route?: string, public request?: queryT) {
super(options)
}
defer: ng.IDeferred<any>;
static exportToExcel(columns: kendo.ui.GridColumn[], dataSource: kendo.data.DataSource, filename: string) {
let rows = [{ cells: columns.map(d => { return { value: d.field }; }) }];
dataSource.fetch(function () {
var data = this.data();
for (var i = 0; i < data.length; i++) {
//push single row for every record
rows.push({
cells: _.map(columns, d => { return { value: data[i][d.field] } })
})
}
var workbook = new kendo.ooxml.Workbook({
sheets: [
{
columns: _.map(columns, d => { return { autoWidth: true } }),
// Title of the sheet
title: filename,
// Rows of the sheet
rows: rows
}
]
});
//save the file as Excel file with extension xlsx
kendo.saveAs({ dataURI: workbook.toDataURL(), fileName: filename });
})
}
static getDefaultInstance<queryT extends dtos.QueryDb_1<T>, T>(route: string, request: queryT, $q?: ng.IQService, model?: any) {
let sortInfo: {
orderBy?: string,
orderByDesc?: string,
skip?: number
} = {
};
let opts = {
transport: {
read: {
url: route,
dataType: 'json',
data: request
},
parameterMap: (data, type) => {
if (type == 'read') {
if (data.sort) {
data.sort.forEach((s: any) => {
if (s.field.indexOf('.') > -1) {
var arr = _.split(s.field, '.')
s.field = arr[arr.length - 1];
}
})
}//for autoquery to work, need only field names not entity names.
sortInfo = {
orderByDesc: _.join(_.map(_.filter(data.sort, (s: any) => s.dir == 'desc'), 'field'), ','),
orderBy: _.join(_.map(_.filter(data.sort, (s: any) => s.dir == 'asc'), 'field'), ','),
skip: 0
}
if (data.page)
sortInfo.skip = (data.page - 1) * data.pageSize,
_.extend(data, request);
//override sorting if done via grid
if (sortInfo.orderByDesc) {
(<any>data).orderByDesc = sortInfo.orderByDesc;
(<any>data).orderBy = null;
}
if (sortInfo.orderBy) {
(<any>data).orderBy = sortInfo.orderBy;
(<any>data).orderByDesc = null;
}
(<any>data).skip = sortInfo.skip;
return data;
}
return data;
},
},
requestStart: (e: kendo.data.DataSourceRequestStartEvent) => {
let ds = <AutoQueryKendoDataSource<queryT, T>>e.sender;
if ($q)
ds.defer = $q.defer();
},
requestEnd: (e: kendo.data.DataSourceRequestEndEvent) => {
new DatesToStringsService().convert(e.response);
let ds = <AutoQueryKendoDataSource<queryT, T>>e.sender;
if (ds.defer)
ds.defer.resolve();
},
schema: {
data: (response: dtos.QueryResponse<T>) => {
return response.results;
},
type: 'json',
total: 'total',
model: model
},
pageSize: request.take || 40,
page: 1,
serverPaging: true,
serverSorting: true
}
let ds = new AutoQueryKendoDataSource<queryT, T>(opts, route, request);
return ds;
}
}