Backstopjs site loading issue not solved by delay: what is wrong? - json

When I try to run backstopjs on certain sites (they all must have some dynamic rendering thing in common, though I don't know what), the screenshots generated by backstopjs only include the first piece of content, centered in the screen. Here's the URL to a screenshot: https://user-images.githubusercontent.com/41495147/63806833-1612b680-c8e2-11e9-9932-680864b470b7.png
I've already tried setting the delay to 5 seconds. I've tried waiting until the footer class is available before screenshot. No dice. What is going on? Here's my config file:
"id": "backstop_default",
"viewports": [
{
"label": "phone",
"width": 320,
"height": 480
},
{
"label": "tablet",
"width": 1024,
"height": 768
},
{
"label": "desktop",
"width": 1280,
"height": 1024
}
],
"onBeforeScript": "puppet/onBefore.js",
"onReadyScript": "puppet/onReady.js",
"scenarios": [
{
"label": "VMLYR Home",
"cookiePath": "backstop_data/engine_scripts/cookies.json",
"url": "https://www.vmlyr.com",
"referenceUrl": "",
"readyEvent": "",
"readySelector": ".region-footer",
"delay": 5000,
"hideSelectors": [],
"removeSelectors": [],
"hoverSelector": "",
"clickSelector": "",
"postInteractionWait": 0,
"selectors": [],
"selectorExpansion": true,
"expect": 0,
"misMatchThreshold" : 0.1,
"requireSameDimensions": true
}
],
"paths": {
"bitmaps_reference": "backstop_data/bitmaps_reference",
"bitmaps_test": "backstop_data/bitmaps_test",
"engine_scripts": "backstop_data/engine_scripts",
"html_report": "backstop_data/html_report",
"ci_report": "backstop_data/ci_report"
},
"report": ["browser"],
"engine": "puppeteer",
"engineOptions": {
"args": ["--no-sandbox"]
},
"asyncCaptureLimit": 5,
"asyncCompareLimit": 50,
"debug": false,
"debugWindow": false
}```

Related

How to transcode MP4 video with SRT subtitle on AWS Elemental MediaConvert

I have a MP4 video with SRT captions and I need to transcode them with media convert. In media convert I set automatic ABR and I specified the SRT origin path.
At the moment, I have tested the following:
I set SRT file in one output and video/audio in another
I set SRT, video and audio in the same output
For the first test, the job finish successfully, but on the S3 bucket there isnt any .SRT file. For the second test, the job fails with "aption destination type [SRT] requires a raw muxer." message
This is my JSON for the first test
{
"Queue": "arn:aws:mediaconvert:us-east-1:{{ACCOUNT-NUMBER}}:queues/Default",
"UserMetadata": {},
"Role": "arn:aws:iam::{{ACCOUNT-NUMBER}}:role/{{MY-ROLE-NAME}}",
"Settings": {
"TimecodeConfig": {
"Source": "ZEROBASED"
},
"OutputGroups": [
{
"Name": "DASH ISO",
"Outputs": [
{
"ContainerSettings": {
"Container": "MPD"
},
"VideoDescription": {
"ScalingBehavior": "DEFAULT",
"TimecodeInsertion": "DISABLED",
"AntiAlias": "ENABLED",
"Sharpness": 50,
"CodecSettings": {
"Codec": "H_264",
"H264Settings": {
"InterlaceMode": "PROGRESSIVE",
"ScanTypeConversionMode": "INTERLACED",
"NumberReferenceFrames": 3,
"Syntax": "DEFAULT",
"Softness": 0,
"GopClosedCadence": 1,
"GopSize": 90,
"Slices": 1,
"GopBReference": "DISABLED",
"SlowPal": "DISABLED",
"EntropyEncoding": "CABAC",
"FramerateControl": "INITIALIZE_FROM_SOURCE",
"RateControlMode": "QVBR",
"CodecProfile": "MAIN",
"Telecine": "NONE",
"MinIInterval": 0,
"AdaptiveQuantization": "AUTO",
"CodecLevel": "AUTO",
"FieldEncoding": "PAFF",
"SceneChangeDetect": "ENABLED",
"QualityTuningLevel": "MULTI_PASS_HQ",
"FramerateConversionAlgorithm": "DUPLICATE_DROP",
"UnregisteredSeiTimecode": "DISABLED",
"GopSizeUnits": "FRAMES",
"ParControl": "INITIALIZE_FROM_SOURCE",
"NumberBFramesBetweenReferenceFrames": 2,
"RepeatPps": "DISABLED",
"DynamicSubGop": "STATIC"
}
},
"AfdSignaling": "NONE",
"DropFrameTimecode": "ENABLED",
"RespondToAfd": "NONE",
"ColorMetadata": "INSERT"
},
"AudioDescriptions": [
{
"AudioTypeControl": "FOLLOW_INPUT",
"AudioSourceName": "Audio Selector 1",
"CodecSettings": {
"Codec": "AAC",
"AacSettings": {
"AudioDescriptionBroadcasterMix": "NORMAL",
"Bitrate": 96000,
"RateControlMode": "CBR",
"CodecProfile": "LC",
"CodingMode": "CODING_MODE_2_0",
"RawFormat": "NONE",
"SampleRate": 48000,
"Specification": "MPEG4"
}
},
"StreamName": "latino",
"LanguageCodeControl": "FOLLOW_INPUT",
"LanguageCode": "SPA"
}
]
},
{
"ContainerSettings": {
"Container": "MPD"
},
"CaptionDescriptions": [
{
"CaptionSelectorName": "Captions Selector 1",
"DestinationSettings": {
"DestinationType": "SRT"
},
"LanguageCode": "SPA",
"LanguageDescription": "latino"
}
]
}
],
"OutputGroupSettings": {
"Type": "DASH_ISO_GROUP_SETTINGS",
"DashIsoGroupSettings": {
"SegmentLength": 30,
"MinFinalSegmentLength": 0,
"Destination": "s3://{{BUCKET-NAME}}/streaming15/dash-iso/",
"FragmentLength": 2,
"SegmentControl": "SINGLE_FILE",
"MpdProfile": "ON_DEMAND_PROFILE",
"HbbtvCompliance": "NONE"
}
},
"AutomatedEncodingSettings": {
"AbrSettings": {
"MaxAbrBitrate": 8000000,
"MinAbrBitrate": 600000
}
}
}
],
"AdAvailOffset": 0,
"Inputs": [
{
"AudioSelectors": {
"Audio Selector 1": {
"Offset": 0,
"DefaultSelection": "DEFAULT",
"ProgramSelection": 1
}
},
"VideoSelector": {
"ColorSpace": "FOLLOW",
"Rotate": "DEGREE_0",
"AlphaBehavior": "DISCARD"
},
"FilterEnable": "AUTO",
"PsiControl": "USE_PSI",
"FilterStrength": 0,
"DeblockFilter": "DISABLED",
"DenoiseFilter": "DISABLED",
"InputScanType": "AUTO",
"TimecodeSource": "ZEROBASED",
"CaptionSelectors": {
"Captions Selector 1": {
"SourceSettings": {
"SourceType": "SRT",
"FileSourceSettings": {
"SourceFile": "s3://{{BUCKET-NAME}}/PROMO_CAP_01.srt"
}
}
}
},
"FileInput": "s3://{{BUCKET-NAME}}/PROMO_CAP_01.mp4"
}
]
},
"AccelerationSettings": {
"Mode": "DISABLED"
},
"StatusUpdateInterval": "SECONDS_60",
"Priority": 0
}
What I am missing?
According to the AWS Elemental MediaConvert user guide, SRT is not a supported output for a DASH-ISO output group when the input caption type is SRT.
Here's a link to that guide (reference page 176):
https://docs.aws.amazon.com/mediaconvert/latest/ug/mediaconvert-guide.pdf
The supported caption outputs for SRT input in DASH-ISO are:
Burn in
IMSC (as sidecar .fmp4)
IMSC (as sidecar .xml)
TTML (as sidecar .fmp4)
TTML (as sidecar .ttml)
Additionally, there is a gap in the documentation. SRT->DASH-ISO+WebVTT is supported, even though it is not listed. The documentation will be corrected, but I wanted to share that with you in case it helps.
If you must send SRT to the output destination, then you could create a separate output group where the caption is in a track with no container (see pages 192-196 in the document).

Loading pushpin in the forge viewer does not respect the viewerState

We are using the "Autodesk.BIM360.Extension.PushPin" extension inside the forge viewer to enable push pins.
When a push pin has been added to the model, we serialize the pushpin data and store it in our database. An example of such a pushpin is here:
{
"id": "12",
"label": "12",
"status": "quality_issues-not_approved",
"position": {
"x": 15.324803588519861,
"y": -10.150864635427533,
"z": -5.532972775562976
},
"type": "issues",
"objectId": 24518,
"externalId": "d9a1e318-14d0-4d08-b7ab-6d1c331454c2-002793d1",
"viewerState": {
"seedURN": "dXJuOmFkc2sub2JqZWN0czpvcy5vYmplY3Q6MDQyY2QwMmUtNzU0Yi00ZDY2LTgyYTMtNjBmYjFlOWVjMjcxL2U5ODAxZTA4LTUwZjQtNDc0ZS05ZWU4LTAxYWQ0ZGM0ODFiYl9WMV9Lb25nZXN0aWVuKzMwKy0rVGlsYnlnbmluZystK0clMjVDMyUyNUE2bGRlbmRlK2QuKzA1LjA2LnJ2dA",
"objectSet": [{
"id": [],
"isolated": [],
"hidden": [],
"explodeScale": 0,
"idType": "lmv"
}],
"viewport": {
"name": "",
"eye": ["-15.17842530349136", "-0.9048862425583284", "0.6506974303790392"],
"target": ["-22.06049144652811", "0.915848677106827", "-0.4205110420886964"],
"up": [-0.14385076361076257, 0.038057482024001874, 0.9888673247056924],
"worldUpVector": [0, 0, 1],
"pivotPoint": ["-22.510046835506888", "1.6223793651751013", "3.668585646439837"],
"distanceToOrbit": 7.198985875545766,
"aspectRatio": 1.491792224702381,
"projection": "orthographic",
"isOrthographic": true,
"orthographicHeight": 7.198985875545767
},
"renderOptions": {
"environment": "Boardwalk",
"ambientOcclusion": {
"enabled": true,
"radius": 13.123359580052492,
"intensity": 1
},
"toneMap": {
"method": 1,
"exposure": -7,
"lightMultiplier": -1e-20
},
"appearance": {
"ghostHidden": true,
"ambientShadow": true,
"antiAliasing": true,
"progressiveDisplay": true,
"swapBlackAndWhite": false,
"displayLines": true,
"displayPoints": true
}
},
"cutplanes": [],
"globalOffset": {
"x": -20.808594999999997,
"y": 6.686511499999999,
"z": 8.456207
}
},
"objectData": {
"guid": "6de5f80c-73da-30ae-b2d1-8a78f177c2a4",
"urn": "dXJuOmFkc2sub2JqZWN0czpvcy5vYmplY3Q6MDQyY2QwMmUtNzU0Yi00ZDY2LTgyYTMtNjBmYjFlOWVjMjcxL2U5ODAxZTA4LTUwZjQtNDc0ZS05ZWU4LTAxYWQ0ZGM0ODFiYl9WMV9Lb25nZXN0aWVuKzMwKy0rVGlsYnlnbmluZystK0clMjVDMyUyNUE2bGRlbmRlK2QuKzA1LjA2LnJ2dA",
"viewableId": "aaff5911-e8b1-4ae2-b41c-4284d0703eb4-00150218",
"viewName": "{3D}"
}
}
We then load the pushpin into the model again at a later point (when the user reopens the model), like this:
pushPinExtension.loadItems([pushPinItem]);
The result is that the pushpin is added in the model at the correct place, but the viewer state is incorrect. It seems like the viewer state for the pushpin is set to the viewer state of the model at the time when we load the pushpin - and not to the viewer state stored inside the pushpin.
Is this expected behaviour? - and if so, how do I use the viewer state from the pushpin instead?
why not explicitly load the viewer state stored in the pushpin separately after loading the pushpin:
pushPinExtension.loadItems([pushPinItem]);
viewer.restoreState(pushPinItem.viewerState)
EDIT:
Try restore the viewer state when an item is clicked - subscribe to the click event with:
viewer.restoreState(...)
//...
})

Issue while creating a job for AWS Elemental media convert

I am having some issue while creating a job for AWS Elemental media convert.
I have followed the following sequence.
1.) Create a new job
2.) Add input and configurations
3.) Add File output group and configure destination settings
4.) Under Output change Container to No Container
5.) Under Output remove Audio
6.) Under Output -> Video change Codec to JPEG to Frame Capture
7.) Configure frame rate (rate which captures will be produced (more notes and examples below))
8.) Configure max capture settings
I got the following error:
Job_contains_the_following_error:
/outputGroups: Should not match the schema
Here is my job JSON:
{
"Settings": {
"AdAvailOffset": 0,
"Inputs": [
{
"FilterEnable": "AUTO",
"PsiControl": "USE_PSI",
"FilterStrength": 0,
"DeblockFilter": "DISABLED",
"DenoiseFilter": "DISABLED",
"TimecodeSource": "EMBEDDED",
"VideoSelector": {
"ColorSpace": "FOLLOW",
"Rotate": "DEGREE_0"
},
"AudioSelectors": {
"Audio Selector 1": {
"Offset": 0,
"DefaultSelection": "DEFAULT",
"ProgramSelection": 1
}
},
"FileInput": "s3://field-live-user-data/udariyan.mp4"
}
],
"OutputGroups": [
{
"Name": "File Group",
"OutputGroupSettings": {
"Type": "FILE_GROUP_SETTINGS",
"FileGroupSettings": {
"Destination": "s3://field-live-user-data/"
}
},
"Outputs": [
{
"VideoDescription": {
"ScalingBehavior": "DEFAULT",
"TimecodeInsertion": "DISABLED",
"AntiAlias": "ENABLED",
"Sharpness": 50,
"CodecSettings": {
"Codec": "FRAME_CAPTURE",
"FrameCaptureSettings": {
"FramerateNumerator": 30,
"FramerateDenominator": 100,
"MaxCaptures": 2,
"Quality": 80
}
},
"DropFrameTimecode": "ENABLED",
"ColorMetadata": "INSERT",
"Width": 1280,
"Height": 720
},
"ContainerSettings": {
"Container": "RAW"
},
"Extension": "jpg"
}
],
"CustomName": "customGroup"
}
]
},
"Queue": "arn:aws:mediaconvert:us-east-1:469030323850:queues/Default",
"Role": "arn:aws:iam::469030323850:role/myMediaConverter"
}
Currently, you can't have a job template with frame capture only:
AWS Dev forums on this topic

Amcharts does not show 2d Y axis (Json data)

I have the following Json data (extract) that I use to graph a dual Y axis chart with amchart:
[{"Day":"24-05 10H","Production":"0.82431267","USD":"482.02837415988"},{"Day":"24-05 11H","Production":"0.83045272","USD":"485.61885435808"},{"Day":"24-05 12H","Production":"0.83441691","USD":"487.93696995924"},{"Day":"24-05 01H","Production":"0.84323421","USD":"493.09300957644"},{"Day":"24-05 02H","Production":"0.85096095","USD":"497.61132896580006"},{"Day":"24-05 03H","Production":"0.85694953","USD":"501.11323496092"},{"Day":"24-05 04H","Production":"0.868104","USD":"507.635967456"},{"Day":"24-05 06H","Production":"0.8802085","USD":"519.796567173"},{"Day":"24-05 07H","Production":"0.8913847","USD":"532.3438566870001"},{"Day":"24-05 08H","Production":"0.89426695","USD":"530.7322322868499"},{"Day":"24-05 09H","Production":"0.89904346","USD":"531.08385173466"},{"Day":"24-05 10H","Production":"0.90740126","USD":"535.88759172324"},{"Day":"24-05 11H","Production":"0.91944257","USD":"554.8872687652799"},{"Day":"25-05 12H","Production":"0.92783829","USD":"554.54203862259"},{"Day":"25-05 02H","Production":"0.94182047","USD":"565.81654194143"},{"Day":"25-05 03H","Production":"0.94743531","USD":"574.52571941931"},{"Day":"25-05 04H","Production":"0.95331299","USD":"579.83927978564"},{"Day":"25-05 05H","Production":"0.9563386","USD":"580.4497132700001"},{"Day":"25-05 06H","Production":"0.96906754","USD":"594.5520078162"},{"Day":"25-05 07H","Production":"0.97823946","USD":"580.6878346533"},{"Day":"25-05 08H","Production":"0.97823946","USD":"580.6878346533"},{"Day":"25-05 09H","Production":"0.99806768","USD":"595.88932187024"},{"Day":"25-05 10H","Production":"0.00706363","USD":"4.21520001161"},{"Day":"25-05 11H","Production":"0.01715723","USD":"10.19647316008"},{"Day":"25-05 12H","Production":"0.02629501","USD":"15.77321951856"},{"Day":"25-05 01H","Production":"0.04011605","USD":"24.3299831645"}
And the following code used to graph:
<script>
var chart = AmCharts.makeChart("chartdiv", {
"type": "serial",
"dataLoader": {
"url": "http://x.x.x.x/json.php", "format": "json"
},
"valueAxes": [{
"id": "v1",
"startDuration": 1,
"axisColor": "#FF6600",
"axisThickness": 5,
"gridAlpha": 0.1,
"axisAlpha": 1
}, {
"id": "v2",
"axisColor": "#FCD202",
"axisThickness": 5,
"gridAlpha": 0,
"axisAlpha": 1,
"position": "right",
"synchronizeWith": "v1",
"synchronizationMultiplier": 5
}],
"graphs": [{
"valueAxis": "v1",
"type": "column",
"fillColorsField": "#B0DE09",
"balloonText": "[[category]]: <b>[[value]]</b>",
"fillAlphas": 0.8,
"lineAlpha": 0.2,
"title": "Production Crypto",
"valueField": "Production"
}, {
"valueAxis": "v2",
"type": "smoothedLine",
"lineColor": "#364cf2",
"lineThickness": 3,
"bulletBorderThickness": 1,
"hideBulletsCount": 30,
"title": "Production USD",
"valueField": "USD",
"fillAlphas": 0
}],
"chartCursor": {
"categoryBalloonEnabled": false,
"cursorAlpha": 0,
"zoomable": false
},
"categoryField": "Day",
"categoryAxis": {
"gridPosition": "start",
"gridAlpha": 0,
"tickPosition": "start",
"tickLength": 20
}
});
</script>
This is the screenshot of the graph I get:
Where the blue line for the USD series clearly does not render properly.
I have been able to get a dual Y axis graph showing properly (http://jsfiddle.net/spjem6b8/) with random generated data, so I guess this is the json part that generates this issue.
What am I doing wrong here?
Thanks
Your synchronizationMultiplier isn't high enough to make the second Y axis large enough to encapsulate your USD values in your dataset. In your current setup, your second Y axis values range from 0-6, but your USD values range from 4-600. Increase your syncrhonizationMultiplier to 600 in this case and it will work.
"valueAxes": [{
"id": "v1",
"startDuration": 1,
"axisColor": "#FF6600",
"axisThickness": 5,
"gridAlpha": 0.1,
"axisAlpha": 1
}, {
"id": "v2",
"axisColor": "#FCD202",
"axisThickness": 5,
"gridAlpha": 0,
"axisAlpha": 1,
"position": "right",
"synchronizeWith": "v1",
"synchronizationMultiplier": 600
}],
There's also an unsupported beta property called synchronizeGrid that can sometimes do this for your automatically, but it's not guaranteed to work with all value axis setting combinations so your mileage may vary. This is set at the top of the chart config:
AmCharts.makeChart("chartdiv", {
// ...
synchronizeGrid: true, //no need for synchronizeWith/Multiplier in the value axis
// ...
});
Here's an updated fiddle with the updated multiplier: http://jsfiddle.net/spjem6b8/1/

How to optimize a nightwatch.json file?

I'm writing tests for Browserstack using nightwatchJS.
My nightwatch.json looks like that:
{
"src_folders": [
"..."
],
"tests_output": "test/tests_output/",
"detailed_output": true,
"selenium": {
"start_process": false,
"host": "hub.browserstack.com",
"port": 80
},
"test_workers": {
"enabled": true,
"workers": 2
},
"test_settings": {
"ie7": {
"selenium_port": 80,
"selenium_host": "hub.browserstack.com",
"silent": true,
"desiredCapabilities": {
"os": "Windows",
"os_version": "XP",
"browserName": "IE",
"version": "7",
"resolution": "1024x768",
"javascriptEnabled": true,
"acceptSslCerts": true,
"browserstack.local": "true",
"browserstack.video": "true",
"browserstack.debug": "true",
"browserstack.localIdentifier": "<localIdentifier>",
"browserstack.user": "<userName>",
"browserstack.key": "<userkey>"
}
},
"ie8": {
"selenium_port": 80,
"selenium_host": "hub.browserstack.com",
"silent": true,
"desiredCapabilities": {
"os": "Windows",
"os_version": "7",
"browserName": "IE",
"version": "8",
"resolution": "1024x768",
"javascriptEnabled": true,
"acceptSslCerts": true,
"browserstack.local": "true",
"browserstack.video": "true",
"browserstack.debug": "true",
"browserstack.localIdentifier": "<localIdentifier>",
"browserstack.user": "<userName>",
"browserstack.key": "<userkey>"
}
},
"chrome": {
"selenium_port": 80,
"selenium_host": "hub.browserstack.com",
"silent": true,
"desiredCapabilities": {
"os": "Windows",
"os_version": "10",
"browserName": "chrome",
"resolution": "1024x768",
"javascriptEnabled": true,
"acceptSslCerts": true,
"browserstack.local": "true",
"browserstack.video": "true",
"browserstack.debug": "true",
"browserstack.localIdentifier": "<localIdentifier>",
"browserstack.user": "<userName>",
"browserstack.key": "<userkey>"
}
},
//Similar blocks for other platforms
}
}
It's the classic way to define a configuration file.
As you can remark there are a lot of redundant information in each platform: localIdentifier, userName, userkey...etc
My question: Is there a way to optimize the configuration file? So when I want for example to change my userKey or browserstack.debug, I change it only in one place and avoid mistakes?
Thanks to the comment of #Mukesh-Tiwari I was able to optimize my nightwatch.json file. I'm sharing the idea:
for(var i in nightwatch_config.test_settings){
var config = nightwatch_config.test_settings[i];
config['selenium_host'] = nightwatch_config.selenium.host;
config['selenium_port'] = nightwatch_config.selenium.port;
config['desiredCapabilities'] = config['desiredCapabilities'] || {};
for(var j in nightwatch_config.common_capabilities){
config['desiredCapabilities'][j] = config['desiredCapabilities'][j] || nightwatch_config.common_capabilities[j];
}
}
In the example above, I'm defining selenium_host, selenium_port at 1 place so if I need to change them I just touch only 1 line of code.
Thanks again for Mukesh-Tiwari.
Imprtant
To make it works, you need to change your configuration file from nightwatch.json to nightwatch.js because we are using a JS code.