I'm already using snapToPoint so that connections are only possible on the constraints of a vertex. However currently I can connect multiple edges to the same connection point. Is there a built-in way to allow only one connection per connection point?
If no and as I'm new to mxGraph, is there any recommendation on where to put the code in order to get the desired behaviour, e.g. listening to mxEvent.CELL_CONNECTED or mxEvent.CONNECT_CELL? Or do I have to overwrite/reuse any predefined method like mxGraph.cellConnected?
had the same issue, wanting to limit the number of connections to a connection constraint.
I ended up removing connection constraints that already have an edge connected them
10-02-2021 update: found a better way:
graph.getAllConnectionConstraints = function (terminal) {
if (terminal != null && this.model.isVertex(terminal.cell)) {
// connection points: North, East, South, West
var allConnectionConstraints = [
new mxConnectionConstraint(new mxPoint(0.5, 0), true),
new mxConnectionConstraint(new mxPoint(1, 0.5), true),
new mxConnectionConstraint(new mxPoint(0.5, 1), true),
new mxConnectionConstraint(new mxPoint(0, 0.5), true)
];
let result = allConnectionConstraints;
// Remove the ones that have an edge connected to them
if (terminal.cell.edges) {
terminal.cell.edges.forEach((edge) => {
const edgeState = this.view.getState(edge);
const source = edge.source.id === terminal.cell.id;
const edgeConnectionConstraint = graph.getConnectionConstraint(
edgeState,
terminal,
source
);
// edgeConnectionConstraint does not include name property
const itemToDelete = result.find(
(item) =>
item.dx === edgeConnectionConstraint.dx &&
item.dy === edgeConnectionConstraint.dy &&
item.point.equals(edgeConnectionConstraint.point)
);
if (itemToDelete) {
result = result.filter((x) => x !== itemToDelete);
}
result.removeAll(
(item) =>
item.dx === edgeConnectionConstraint.dx &&
item.dy === edgeConnectionConstraint.dy &&
item.point.equals(edgeConnectionConstraint.point)
);
});
}
return result;
}
return null;
};
https://codesandbox.io/s/mxgraph-react-example-forked-uwru4
below is original post...
var graph = this.graph;
this.graph.getAllConnectionConstraints = function (terminal) {
if (terminal != null && this.model.isVertex(terminal.cell)) {
// connection points: North, East, South, West
var allConnectionConstraints = [new mxConnectionConstraint(new mxPoint(.5, 0), true),
new mxConnectionConstraint(new mxPoint(1, .5), true),
new mxConnectionConstraint(new mxPoint(.5, 1), true),
new mxConnectionConstraint(new mxPoint(0, .5), true)];
var result = [];
// loop through all connection constraints
allConnectionConstraints.forEach(connectionConstraint => {
var add = true;
// see if an edge is already connected to this constraint
if (terminal && terminal.cell && terminal.cell.edges) {
terminal.cell.edges.forEach(edge => {
var edgeStyle = graph.getCellStyle(edge);
var edgeX = -1;
var edgeY = -1;
// is this edge comming in or going out?
if (edge.source.id === terminal.cell.id) {
// going out
edgeX = edgeStyle.exitX;
edgeY = edgeStyle.exitY;
} else if (edge.target.id === terminal.cell.id) {
// comming in
edgeX = edgeStyle.entryX;
edgeY = edgeStyle.entryY;
}
if (connectionConstraint.point.x === edgeX &&
connectionConstraint.point.y === edgeY) {
// already a connection to this connectionConstraint, do not add to result
add = false;
}
});
}
if (add) {
result.push(connectionConstraint);
}
});
// return all connectionConstraints for this terminal that do not already have a connection
return result;
}
return null;
};
https://codesandbox.io/s/mxgraph-react-example-4ox9f
Related
Im trying to implement the XLS Extension. In the ModelData class, i cannot get objects leaf nodes because the viewer is undefined.
Here is the problematic method:
getAllLeafComponents(callback) {
// from https://learnforge.autodesk.io/#/viewer/extensions/panel?id=enumerate-leaf-nodes
viewer.getObjectTree(function (tree) {
let leaves = [];
tree.enumNodeChildren(tree.getRootId(), function (dbId) {
if (tree.getChildCount(dbId) === 0) {
leaves.push(dbId);
}
}, true);
callback(leaves);
});
}
Im getting Cannot read properties of undefined (reading 'getObjectTree') , meaning viewer is undefined.
However, viewer is working and displaying documents.
I tried to call it by window.viewer and this.viewer to no avail.
Thanks in advance for any help
It looks like it missed two lines. Could you try the revised one below?
// Model data in format for charts
class ModelData {
constructor(viewer) {
this._modelData = {};
this._viewer = viewer;
}
init(callback) {
var _this = this;
var viewer = _this._viewer;
_this.getAllLeafComponents(function (dbIds) {
var count = dbIds.length;
dbIds.forEach(function (dbId) {
viewer.getProperties(dbId, function (props) {
props.properties.forEach(function (prop) {
if (!isNaN(prop.displayValue)) return; // let's not categorize properties that store numbers
// some adjustments for revit:
prop.displayValue = prop.displayValue.replace('Revit ', ''); // remove this Revit prefix
if (prop.displayValue.indexOf('<') == 0) return; // skip categories that start with <
// ok, now let's organize the data into this hash table
if (_this._modelData[prop.displayName] == null) _this._modelData[prop.displayName] = {};
if (_this._modelData[prop.displayName][prop.displayValue] == null) _this._modelData[prop.displayName][prop.displayValue] = [];
_this._modelData[prop.displayName][prop.displayValue].push(dbId);
})
if ((--count) == 0) callback();
});
})
})
}
getAllLeafComponents(callback) {
var _this = this;
var viewer = _this._viewer;
// from https://learnforge.autodesk.io/#/viewer/extensions/panel?id=enumerate-leaf-nodes
viewer.getObjectTree(function (tree) {
var leaves = [];
tree.enumNodeChildren(tree.getRootId(), function (dbId) {
if (tree.getChildCount(dbId) === 0) {
leaves.push(dbId);
}
}, true);
callback(leaves);
});
}
hasProperty(propertyName){
return (this._modelData[propertyName] !== undefined);
}
getLabels(propertyName) {
return Object.keys(this._modelData[propertyName]);
}
getCountInstances(propertyName) {
return Object.keys(this._modelData[propertyName]).map(key => this._modelData[propertyName][key].length);
}
getIds(propertyName, propertyValue) {
return this._modelData[propertyName][propertyValue];
}
}
Starting with login user data, I would like to see only the sub diagram.
function showLocalOnFullClick() {
var node = myDiagram.selection.first();
if (node !== null) {
myDiagram.scrollToRect(node.actualBounds);
var model = new go.TreeModel();
var nearby = node.findTreeParts(); // three levels of the (sub)tree
nearby.each(function(n) {
if (n instanceof go.Node) model.addNodeData(n.data);
});
myDiagram.model = model;
var selectedLocal = myDiagram.findPartForKey(node.data.key);
if (selectedLocal !== null) selectedLocal.isSelected = true;
}
}
Is there any other way than var node = myDiagram.selection.first();?
I want Get data from search or key
findNodesByExample
findNodeForData
findNodeDataForKey
I am doing logging for my application using winston. I have done the file transport using this:
class LoggerHelper extends BaseHelper {
constructor(_cApp) {
super(_cApp);
this._props = {};
}
initialize() {
this._prepareConfigs();
this._createTransportObj();
this._createLoggerObj();
}
_prepareConfigs() {
this._props.dirname = this._configs.logsFolder;
this._props.filename = this._configs.filenameConvention;
this._props.datePattern = this._configs.datePattern;
this._props.maxSize = this._configs.maxSize;
this._props.level = this._configs.level;
this._props.timestamp = this._configs.timestamp;
this._props.prettyPrint = this._configs.prettyPrint;
}
_createTransportObj() {
var DailyRotateFile = winston.transports.DailyRotateFile;
this._transport = new DailyRotateFile(this._props);
}
_createLoggerObj() {
this._logger = winston.createLogger({
transports: [this._transport],
exitOnError: false
});
}
_log(type, error, description, stage, vars) {
var logMsg = {};
var msg = '';
var fileIndex = 3;
if(this._isError(error)) {
var err = error;
msg = error.message;
fileIndex = 1;
} else {
var err = new Error();
msg = error;
}
var caller_line = err.stack.split("at ")[fileIndex];
var index = caller_line.indexOf("(");
var lastIndex = caller_line.lastIndexOf(")");
index = caller_line.slice(index + 1, lastIndex);
var line = index.match(/:[0-9]+:/).toLocaleString();
line = line.replace(/[^0-9]/g, '');
var curTime = new FE.utils.date();
var timestamp = curTime.format('YYYY-MM-DD HH:MM:SS');
logMsg.level = type || 'info';
logMsg.time = timestamp || '';
logMsg.msg = msg || '';
logMsg.desc = description || '';
logMsg.stg = stage || '000';
logMsg.file = index || 'Not Found';
logMsg.stack = err.stack || 'Not Found';
logMsg.line = line || 'Not Found';
var logStr = JSON.stringify(logMsg);
this._logger.log(type, logMsg);
}
info(error, description, stage, vars) {
return this._log('info', error, description, stage, vars);
}
error(error, description, stage, vars) {
return this._log('error', error, description, stage, vars);
}
warn(error, description, stage, vars) {
return this._log('warn', error, description, stage, vars);
}
verbose(error, description, stage, vars) {
return this._log('verbose', error, description, stage, vars);
}
debug(error, description, stage, vars) {
return this._log('debug', error, description, stage, vars);
}
silly(error, description, stage, vars) {
return this._log('silly', error, description, stage, vars);
}
/**
* Checks if value is an Error or Error-like object
* #static
* #param {Any} val Value to test
* #return {Boolean} Whether the value is an Error or Error-like object
*/
_isError(val) {
return !!val && typeof val === 'object' && (
val instanceof Error || (
val.hasOwnProperty('message') && val.hasOwnProperty('stack')
)
);
}
}
module.exports = LoggerHelper;
Now I want to store my logs into a mysql db table as well. I did come across a winston plugin for Mongo but I don't see any support for storing it into a mysql db. Is there a way I can achieve this?
Thanks in advance.
I was facing the same issue recently. after doing some research I found one package 'winston-sql-transport' to save logs into mysql.
see this:
const { Logger } = require('winston');
const { SQLTransport } = require('./../lib/winston-sql-transport');
const logger = new Logger({
transports: [
new SQLTransport({
tableName: 'winston_logs',
})]
});
module.exports = logger;
Is it common sense to use polymer build and then deploy the Application on your Web Server used for production?
Or does it make sense to actutally use polymer serve / polyserve as the Web Server?
The problem with polymer serve is that if it falls over it doesn't restart, leaving you with no web site. Its real use is in development because it maps directories for you when you are developing a single element.
Also, how will you be handling ajax calls?
IN the past I have previously run my code (a bespoke node web server) in PM2. These days I run using docker, and in particular docker-compose which also restarts the application if it fails.
EDIT The following is how I transpile on the fly code is copied (and then altered by me) from Google Polymer Teams "Polymer Server" and is therefore subject to the licence conditions given in that project.
* Copyright (c) 2016 The Polymer Project Authors. All rights reserved.
* This code may only be used under the BSD style license found at
* http://polymer.github.io/LICENSE.txt
* The complete set of authors may be found at
* http://polymer.github.io/AUTHORS.txt
* The complete set of contributors may be found at
* http://polymer.github.io/CONTRIBUTORS.txt
* Code distributed by Google as part of the polymer project is also
* subject to an additional IP rights grant found at
* http://polymer.github.io/PATENTS.txt
The code consists of some supporting functions like these
const parse5 = require('parse5');
const dom5 = require('dom5');
const LRU = require('lru-cache');
const babelCore = require('babel-core');
const transformLog = require('debug')('web:transform');
const babelTransformers = [
'babel-plugin-transform-es2015-arrow-functions',
'babel-plugin-transform-es2015-block-scoped-functions',
'babel-plugin-transform-es2015-block-scoping',
'babel-plugin-transform-es2015-classes',
'babel-plugin-transform-es2015-computed-properties',
'babel-plugin-transform-es2015-destructuring',
'babel-plugin-transform-es2015-duplicate-keys',
'babel-plugin-transform-es2015-for-of',
'babel-plugin-transform-es2015-function-name',
'babel-plugin-transform-es2015-literals',
'babel-plugin-transform-es2015-object-super',
'babel-plugin-transform-es2015-parameters',
'babel-plugin-transform-es2015-shorthand-properties',
'babel-plugin-transform-es2015-spread',
'babel-plugin-transform-es2015-sticky-regex',
'babel-plugin-transform-es2015-template-literals',
'babel-plugin-transform-es2015-typeof-symbol',
'babel-plugin-transform-es2015-unicode-regex',
'babel-plugin-transform-regenerator',
].map((name) => require(name));
const isInlineJavaScript = dom5.predicates.AND(
dom5.predicates.hasTagName('script'),
dom5.predicates.NOT(dom5.predicates.hasAttr('src')));
const babelCompileCache = LRU({
length: (n, key) => n.length + key.length
});
function compileHtml(source, location) {
const document = parse5.parse(source);
const scriptTags = dom5.queryAll(document, isInlineJavaScript);
for (const scriptTag of scriptTags) {
try {
const script = dom5.getTextContent(scriptTag);
const compiledScriptResult = compileScript(script);
dom5.setTextContent(scriptTag, compiledScriptResult);
} catch (e) {
// By not setting textContent we keep the original script, which
// might work. We may want to fail the request so a better error
// shows up in the network panel of dev tools. If this is the main
// page we could also render a message in the browser.
//eslint-disable-next-line no-console
console.warn(`Error compiling script in ${location}: ${e.message}`);
}
}
return parse5.serialize(document);
}
function compileScript(script) {
return babelCore
.transform(script, {
plugins: babelTransformers,
}).code;
}
function transform(request, body, isHtml) {
const source = body;
const cached = babelCompileCache.get(source);
if (cached !== undefined) {
transformLog('using the cache');
return cached;
}
if (isHtml) {
transformLog('compiling html');
body = compileHtml(source, request.path);
} else {
transformLog('compiling js');
body = compileScript(source);
}
babelCompileCache.set(source, body);
return body;
}
The meat though is the middleware which effectively inserts itself in the outgoing stream captures all the chunks of outgoing html and js files and transforms them if necessary.
function transformResponse(transformNeeded) {
return (req, res, next) => {
let ended = false;
let _shouldTransform = null;
let isHtml = true;
// Note: this function memorizes its result.
function shouldTransform() {
if (_shouldTransform == null) {
const successful = res.statusCode >= 200 && res.statusCode < 300;
if (successful) {
const result = transformNeeded(req);
isHtml = result.isHtml;
_shouldTransform = !!result.transform;
} else {
_shouldTransform = false;
}
}
return _shouldTransform;
}
const chunks = [];
const _write = res.write;
res.write = function( chunk, enc, cb) {
if (ended) {
_write.call(this, chunk, enc, cb);
return false;
}
if (shouldTransform()) {
const buffer = (typeof chunk === 'string') ? new Buffer(chunk,enc) : chunk;
chunks.push(buffer);
return true;
}
return _write.call(this, chunk, enc, cb);
}.bind(res);
const _end = res.end;
res.end = function (chunk, enc, cb) {
if (ended)
return false;
ended = true;
if (shouldTransform()) {
if (chunk) {
const buffer = (typeof chunk === 'string') ? new Buffer(chunk,enc) : chunk;
chunks.push(buffer);
}
const body = Buffer.concat(chunks).toString('utf8');
let newBody = body;
try {
newBody = transform(req, body, isHtml);
} catch (e) {
//eslint-disable-next-line no-console
console.warn('Error', e);
}
// TODO(justinfagnani): re-enable setting of content-length when we know
// why it was causing truncated files. Could be multi-byte characters.
// Assumes single-byte code points!
// res.setHeader('Content-Length', `${newBody.length}`);
this.removeHeader('Content-Length');
return _end.call(this, newBody);
}
return _end.call(this,chunk, enc, cb);
}.bind(res);
next();
};
}
This routine called transformNeeded which is as follows (this is the bit that detects the brower)
function transformNeeded(req) {
const pathname = url.parse(req.url).pathname;
const isHtml = pathname === '/' || pathname.slice(-5) === '.html';
if (isHtml || pathname.slice(-3) === '.js') {
//see if we need to compile as we have a .html or .js file
const splitPathName = pathname.split('/');
const isPolyfill = splitPathName.includes('webcomponentsjs') ||
splitPathName.includes('promise-polyfill');
if (!isPolyfill) {
const browser = new UAParser(req.headers['user-agent']).getBrowser();
const versionSplit = (browser.version || '').split('.');
const [majorVersion, minorVersion] = versionSplit.map((v) => v ? parseInt(v, 10) : -1);
const supportsES2015 = (browser.name === 'Chrome' && majorVersion >= 49) ||
(browser.name === 'Chromium' && majorVersion >= 49) ||
(browser.name === 'OPR' && majorVersion >= 36) ||
(browser.name === 'Mobile Safari' && majorVersion >= 10) ||
(browser.name === 'Safari' && majorVersion >= 10) ||
// Note: The Edge user agent uses the EdgeHTML version, not the main
// release version (e.g. EdgeHTML 15 corresponds to Edge 40). See
// https://en.wikipedia.org/wiki/Microsoft_Edge#Release_history.
//
// Versions before 15.15063 may contain a JIT bug affecting ES6
// constructors (see #161).
(browser.name === 'Edge' &&
(majorVersion > 15 || (majorVersion === 15 && minorVersion >= 15063))) ||
(browser.name === 'Firefox' && majorVersion >= 51);
requestLog(
'Browser is %s version %d,%d - supports ES2015? ',
browser.name,
majorVersion,
minorVersion,
supportsES2015
);
return {transform: !supportsES2015, isHtml: isHtml};
}
}
return {transform: false, isHtml: isHtml};
}
Finally, I have to set up the routes before I establish the web server and then tell the web server to use the routes I have set up.
const Router = require('router');
//sets up my API routes
manager.setRoutes(router);
router.use('/', transformResponse(this.transformNeeded));
router.use('/', staticFiles(clientPath));
this._start(router);
In order to try and get around the odd issue in having with CORS (here) I am attempting to reload any images loaded via canvas.loadFromJSON()
But, I am experiencing weird issues. Sometimes only one image is replaced, other times I get duplicates of one image.
Here is my code:
canvas.loadFromJSON(<?php echo json_encode($objects); ?>, function() {
var objArray = canvas.getObjects();
for (var i = 0; i < objArray.length; i++) {
canvas.setActiveObject(objArray[i]);
var activeObject = canvas.getActiveObject();
if(activeObject.type === 'image') {
fabric.util.loadImage(activeObject.src, function(img) {
var object = new fabric.Image(img);
object.hasControls = true;
object.lockUniScaling = true;
object.scaleX = activeObject.scaleX;
object.scaleY = activeObject.scaleY;
object.originX = activeObject.originX;
object.originY = activeObject.originY;
object.centeredRotation = true;
object.centeredScaling = true;
canvas.add(object);
}, null, {crossOrigin: 'Anonymous'});
canvas.remove(activeObject);
}
activeObject.setCoords();
}
canvas.deactivateAll();
canvas.renderAll();
canvas.calcOffset();
});
Any ideas why I'm getting these weird issues?
First glance at your code I don't see anything wrong... But I'm also thinking the code might be a bit inefficient? Is there a need to create a new image instance?
I believe you should be able to just set the crossOrigin property on the image object.
This code is untested, but I'd try something like this:
canvas.loadFromJSON(<?php echo json_encode($objects); ?>, function() {
var objArray = canvas.getObjects();
for (var i = 0; i < objArray.length; i++) {
canvas.setActiveObject(objArray[i]);
var activeObject = canvas.getActiveObject();
if(activeObject.type === 'image') {
activeObject.crossOrigin = 'Anonymous';
}
}
canvas.deactivateAll();
canvas.renderAll();
canvas.calcOffset();
});
I had the same problem and overcome it downloading again the image then reassign it to object._element once each fabric object was created using loadFromJSON.
export const getImage = url => {
return new Promise((resolve, reject) => {
let img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.setAttribute('crossOrigin', 'anonymous');
img.src = url;
});
}
canvas.loadFromJSON(json, canvas.renderAll.bind(canvas), async (o, object) => {
if (object.type === "image") {
let imagecore = await getImage(object.src);
object._element = imagecore;
}
});