I created a countdown from 5 to 0. It start when you click on the "START" button:
<html>
<head>
<meta charset="utf-8"/>
<script src="https://cdnjs.cloudflare.com/ajax/libs/rxjs/6.5.5/rxjs.umd.js">
</script>
</head>
<body>
<button id="start">START</button>
COUNTDOWN:<span id="countdown"></span>
<script>
let start = document.getElementById('start');
let start_click = rxjs.fromEvent(start, 'click');
start_click.subscribe(x => console.log('click'));
start_click.pipe(rxjs.operators.first()).subscribe(
()=> {
let time = rxjs.timer(0, 1000).pipe(
rxjs.operators.skip(0)
, rxjs.operators.take(6)
, rxjs.operators.map(x => 5-x)
);
time.subscribe(x => console.log('instant', x));
let countdown = document.getElementById('countdown');
time.subscribe(x => countdown.innerText = x);
start.disabled = true;
let end = time.pipe(
rxjs.operators.last()
, rxjs.operators.repeatWhen(() => start_click)
);
end.subscribe(x=>start.disabled = false);
start_click.subscribe(x => start.disabled = true);
});
</script>
</body>
</html>
I struggle to find how to reset the countdown when the "START" button is pressed again.
I tried to add:
start_click.subscribe(x => countdown.innerText = 5);
But the value is static. Thanks.
The reason it doesn't work after clicking 'Start' a second time is because you are using the first() operator on your start_click observable.
This means the observable only emits on the first click, then completes.
Simply remove .pipe(rxjs.operators.first()) and your code will work each time you click the button.
However, it's generally a good idea to avoid nested subscriptions when possible. This can help you avoid memory leaks (due not unsubscribing properly) and make the code easier to understand.
You can avoid using nested subscriptions by using one of the "Higher Order Mapping Operators" LINK. This is just a fancy way of saying: operators that map the incoming value to another observable, subscribe to it, and emit those values. They also manage these "inner subscriptions" automatically.
The switchMap operator will "switch" to a new observable whenever a new value is received. So in your case, whenever a new click is received, a new 5-second timer observable is created.
Simplified code could look something like this: Working StackBlitz
const start = document.getElementById('start');
const countdown = document.getElementById('countdown');
const start_click = rxjs.fromEvent(start, 'click');
const time = start_click.pipe(
tap(() => start.disabled = true),
switchMap(() => timer(0, 1000).pipe(
map(x => 5-x),
take(6),
finalize(() => start.disabled = false)
)),
);
time.subscribe(
x => countdown.innerText = x
);
Notice how there is only a single subscription now. We defined two different observables, start_click which is your stream of clicks and time which is your stream that emits the current value of the timer. time is defined from the start_click stream, so whenever a new click is received, under the hood a new timer gets created and emits values.
The issue is caused by rxjs.operators.first(),
try next
start_click.pipe(
rxjs.operators.tap(() => start.disabled = true)
, switchMap(() => rxjs.timer(0, 1000).pipe(
rxjs.operators.skip(0) // <- do you need it?
, rxjs.operators.take(6)
, rxjs.operators.map(x => 5-x)
, rxjs.operators.finalize(() => start.disabled = false)
)),
).subscribe(x => {
console.log('instant', x);
let countdown = document.getElementById('countdown');
countdown.innerText = x;
});
Related
Today I ran into rather a strange behaviour of Chrome. I was playing with PerformanceObserver and found out that when you add two stylesheets with the same URL to the DOM very quickly then chrome fires only one request which obviously makes sense as it saves network load.
const testCase = async () => {
let numberOfRecords = 0
const observer = new PerformanceObserver((entryList) => {
const performanceEntries = entryList.getEntries()
numberOfRecords += performanceEntries.length
})
observer.observe({ entryTypes: ['resource'] })
// Test: Only one performance record is created because links are added at the same time
// and chrome detects duplicate request
const linkElement1 = document.createElement('link')
linkElement1.rel = 'stylesheet'
linkElement1.href = 'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css'
document.head.appendChild(linkElement1)
const linkElement2 = document.createElement('link')
linkElement2.rel = 'stylesheet'
linkElement2.href = 'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css'
document.head.appendChild(linkElement2)
// wait a little bit so performance observer callback is called
await new Promise((resolve) => {
setTimeout(() => resolve(), 1000)
})
console.assert(numberOfRecords === 1, 'Test')
console.log('Test finished')
}
testCase()
When sleep time is added between adding link nodes to DOM then chrome fires two requests (the second one is taken from cache)
const testCase = async () => {
let numberOfRecords = 0
const observer = new PerformanceObserver((entryList) => {
const performanceEntries = entryList.getEntries()
numberOfRecords += performanceEntries.length
})
observer.observe({ entryTypes: ['resource'] })
// Test: Only one performance record is created because links are added at the same time
// and chrome detects duplicate request
const linkElement1 = document.createElement('link')
linkElement1.rel = 'stylesheet'
linkElement1.href = 'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css'
document.head.appendChild(linkElement1)
// wait here so chrome triggers two requests
await new Promise((resolve) => {
setTimeout(() => resolve(), 1000)
})
const linkElement2 = document.createElement('link')
linkElement2.rel = 'stylesheet'
linkElement2.href = 'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css'
document.head.appendChild(linkElement2)
// wait a little bit so performance observer callback is called
await new Promise((resolve) => {
setTimeout(() => resolve(), 1000)
})
console.assert(numberOfRecords === 2, 'Test')
console.log('Test finished')
}
testCase()
However when I run this second code via automated test (webdriver.io) or I try it on cloud service like Browserstack/Lambdatest (the same browser version, OS version) it fails as it triggers only one request. So I wonder what's the difference?
To see it by yourself you can open some empty page (it's quite important that page is empty and doesn't contain any background requests) and copy the code examples to console.
I just wonder whether you disabled the cache for the automation tests. In Chrome, apparently if you didn't tick "Disable cache", then you second test should fail.
I was able to successfully integrate Threejs Effect composer in aframe as a component by exporting everything as THREE.Effectcomposer, THREE.SSAOPass etc. and adding the effect inside a aframe component and i tweaked the AFrame renderer to update the effects in the scene. OutlinePass from threejs worked fine in this code but SSAO is not working and i don't get any errors. Please someone help me figure out the problem. the code for SSAOPass looks like this
AFRAME.registerComponent('ssao', {
init: function () {
this.el.addEventListener('that', evt => this.onEnter());
this.el.addEventListener('mouseleave', evt => this.onLeave());
setTimeout(() => this.el.emit("that"), 2000);
},
onEnter: function () {
const scene = this.el.sceneEl.object3D;
const camera = this.el.sceneEl.camera;
const renderer = this.el.sceneEl.renderer;
const render = renderer.render;
const composer = new THREE.EffectComposer(renderer);
//let renderPass = new THREE.RenderPass(scene, camera);
//let outlinePass = new THREE.OutlinePass(new THREE.Vector2(window.innerWidth, window.innerHeight), scene, camera);
const ssaoPass = new THREE.SSAOPass( scene, camera, window.innerWidth, window.innerHeight );
//composer.addPass(renderPass);
//composer.addPass(outlinePass);
ssaoPass.kernelRadius = 16;
composer.addPass( ssaoPass );
// let objects = [];
// this.el.object3D.traverse(node => {
// if (!node.isMesh) return;
// objects.push(node);
// });
// outlinePass.selectedObjects = objects;
// outlinePass.renderToScreen = true;
// outlinePass.edgeStrength = this.data.strength;
// outlinePass.edgeGlow = this.data.glow;
// outlinePass.visibleEdgeColor.set(this.data.color);
// HACK the AFRAME render method (a bit ugly)
const clock = new THREE.Clock();
this.originalRenderMethod = render;
let calledByComposer = false;
renderer.render = function () {
if (calledByComposer) {
render.apply(renderer, arguments);
} else {
calledByComposer = true;
composer.render(clock.getDelta());
calledByComposer = false;
}
};
},
onLeave: function () {
this.el.sceneEl.renderer.render = this.originalRenderMethod;
},
remove: function () {
this.onLeave();
}
});
I have also created a glitch project which i am sharing here. Please feel free to join and collaborate in my project
Edit link: https://glitch.com/edit/#!/accessible-torpid-partridge
Site link:https://accessible-torpid-partridge.glitch.me
Thanks in advance
The code is correct, all you need is to tweak the exposed SSAOShader uniforms: SSAOPass.kernelRadius, SSAOPass.minDistance, SSAOPass.maxDistance - like in the Three.js example.
Keep in mind - the scale in the example is huge, so the values will need to be different in a default aframe scene.
It's a good idea to be able to dynamically update a component (via setAttribute() if you properly handle updates), so you can see what's going on in realtime. Something like I did here - SSAO in a-frame (also based on Don McCurdys gist.
I've used some basic HTML elements, most threejs examples use dat.GUI - it is made for demo / debug tweaks.
I am using MediaRecorder API to record videos in web applications. The application has the option to switch between the camera and screen. I am using Canvas to augment stream recording. The logic involves capturing stream from the camera and redirecting it to the video element. This video is then rendered on canvas and the stream from canvas is passed to MediaRecorder.
What I noticed is that switching from screen to video (and vice-versa) works fine as long as the user doesn't switch/minimize the chrome window. The canvas rendering uses requestAnimationFrame and it freezes after the tab loses its focus.
Is there any way to instruct chrome not to pause the execution of requestAnimationFrame? Is there any alternate way to switch streams without impacting MediaRecorder recording?
Update:
After reading through the documentation, tabs which play audio or having active websocket connection are not throttled. This is something which we are not doing at this moment. This might be a workaround, but hoping for any alternative solution from community. (setTimeout or setInterval are too throttled and hence not using that, plus it impacts rendering quality)
Update 2:
I could able to fix this problem using Worker. Instead of using Main UI Thread for requestAnimationFrame, the worker is invoking the API and the notification is sent to Main Thread via postMessage. Upon completion of rendering by UI Thread, a message is sent back to Worker. There is also a delta period calculation to throttle overwhelming messages from worker.
There is an ongoing proposal to add a .replaceTrack() method to the MediaRecorder API, but for the time being, the specs still read
If at any point, a track is added to or removed from stream’s track set, the UA MUST immediately stop gathering data, discard any data that it has gathered [...]
And that's what is implemented.
So we still have to rely on hacks to make this by ourselves...
The best one is probably to create a local RTC connection, and to record the receiving end.
// creates a mixable stream
async function mixableStream( initial_track ) {
const source_stream = new MediaStream( [] );
const pc1 = new RTCPeerConnection();
const pc2 = new RTCPeerConnection();
pc1.onicecandidate = (evt) => pc2.addIceCandidate( evt.candidate );
pc2.onicecandidate = (evt) => pc1.addIceCandidate( evt.candidate );
const wait_for_stream = waitForEvent( pc2, 'track')
.then( evt => new MediaStream( [ evt.track ] ) );
pc1.addTrack( initial_track, source_stream );
await waitForEvent( pc1, 'negotiationneeded' );
try {
await pc1.setLocalDescription( await pc1.createOffer() );
await pc2.setRemoteDescription( pc1.localDescription );
await pc2.setLocalDescription( await pc2.createAnswer() );
await pc1.setRemoteDescription( pc2.localDescription );
} catch ( err ) {
console.error( err );
}
return {
stream: await wait_for_stream,
async replaceTrack( new_track ) {
const sender = pc1.getSenders().find( ( { track } ) => track.kind == new_track.kind );
return sender && sender.replaceTrack( new_track ) ||
Promise.reject( "no such track" );
}
}
}
{ // remap unstable FF version
const proto = HTMLMediaElement.prototype;
if( !proto.captureStream ) { proto.captureStream = proto.mozCaptureStream; }
}
waitForEvent( document.getElementById( 'starter' ), 'click' )
.then( (evt) => evt.target.parentNode.remove() )
.then( (async() => {
const urls = [
"2/22/Volcano_Lava_Sample.webm",
"/a/a4/BBH_gravitational_lensing_of_gw150914.webm"
].map( (suffix) => "https://upload.wikimedia.org/wikipedia/commons/" + suffix );
const switcher_btn = document.getElementById( 'switcher' );
const stop_btn = document.getElementById( 'stopper' );
const video_out = document.getElementById( 'out' );
let current = 0;
// see below for 'recordVid'
const video_tracks = await Promise.all( urls.map( (url, index) => getVideoTracks( url ) ) );
const mixable_stream = await mixableStream( video_tracks[ current ].track );
switcher_btn.onclick = async (evt) => {
current = +!current;
await mixable_stream.replaceTrack( video_tracks[ current ].track );
};
// final recording part below
// only for demo, so we can see what happens now
video_out.srcObject = mixable_stream.stream;
const rec = new MediaRecorder( mixable_stream.stream );
const chunks = [];
rec.ondataavailable = (evt) => chunks.push( evt.data );
rec.onerror = console.log;
rec.onstop = (evt) => {
const final_file = new Blob( chunks );
video_tracks.forEach( (track) => track.stop() );
// only for demo, since we did set its srcObject
video_out.srcObject = null;
video_out.src = URL.createObjectURL( final_file );
switcher_btn.remove();
stop_btn.remove();
const anchor = document.createElement( 'a' );
anchor.download = 'file.webm';
anchor.textContent = 'download';
anchor.href = video_out.src;
document.body.prepend( anchor );
};
stop_btn.onclick = (evt) => rec.stop();
rec.start();
}))
.catch( console.error )
// some helpers below
// returns a video loaded to given url
function makeVid( url ) {
const vid = document.createElement('video');
vid.crossOrigin = true;
vid.loop = true;
vid.muted = true;
vid.src = url;
return vid.play()
.then( (_) => vid );
}
/* Records videos from given url
** #method stop() ::pauses the linked <video>
** #property track ::the video track
*/
async function getVideoTracks( url ) {
const player = await makeVid( url );
const track = player.captureStream().getVideoTracks()[ 0 ];
return {
track,
stop() { player.pause(); }
};
}
// Promisifies EventTarget.addEventListener
function waitForEvent( target, type ) {
return new Promise( (res) => target.addEventListener( type, res, { once: true } ) );
}
video { max-height: 100vh; max-width: 100vw; vertical-align: top; }
.overlay {
background: #ded;
position: fixed;
z-index: 999;
height: 100vh;
width: 100vw;
top: 0;
left: 0;
display: flex;
align-items: center;
justify-content: center;
}
<div class="overlay">
<button id="starter">start demo</button>
</div>
<button id="switcher">switch source</button>
<button id="stopper">stop recording</button>
<video id="out" muted controls autoplay></video>
Otherwise you can still go the canvas way, with the Web Audio Timer I made for when the page is blurred, even though this will not work in Firefox since they do internally hook to rAF to push new frames in the recorder...
I had the same problem and trying to figure it out without too much complexity such as Canvas or SourceBuffer.
I used the PeerConnection for same page to make a connection. Once the connection is made you can use a rtpSender via peerconnection.addTrack And from there you can easily switch.
I just made a library and a demo that you can find:
https://github.com/meething/StreamSwitcher/
I'd like to prevent users from right-clicking on a Chromium instance generated in Puppeteer. How can I do this?
The most efficient way to do it is to register a function that will run before opening any new page:
await page.evaluateOnNewDocument(() =>
document.addEventListener('contextmenu', event => event.preventDefault())
);
Of course the target page could remove and/or reassign that event listener. So another way is to wait for the page to load (or wait for some selector) and create event listener again:
await page.evaluate(() =>
document.addEventListener('contextmenu', event => event.preventDefault())
);
Or you could do an even more devious thing and hijack document.addEventListener to disallow adding contextmenu event listeners at all:
await page.evaluateOnNewDocument(() => {
document.addEventListener('contextmenu', event => event.preventDefault())
const realAddEventListener = document.addEventListener;
document.addEventListener = function(type, listener){
type === 'contextmenu' || realAddEventListener.apply(this, arguments);
};
}
);
(based on this answer and its comment)
i am looking for a nice document explaining well about pagination in react native.I can't find a one i'm looking for.I'm fetching the data from server (set of 15 questions and answers).I want to display single question in a page with a next or previous button at the bottom.How to do this?Now i'm displaying all the 15 questions in a single page with ScrollView. But i want pagination.Please help me.
The library react-native-swiper would be the best to use in such a scenario.The example is mentioned in the following link here.
This library uses ScrollView , with a snap animation effect for each item and also contains the customized next and previous button as mentioned here.
var start=0; // above class
var end=100;
fetchData = () => {
var mydata = realm.objects('Product_Info');
this.setState({dbData: mydata})
console.log("fetch---------- paggingData.start--> " + start);
console.log("fetch---------- paggingData.end--> " + end);
var newData = mydata.filtered('prodId > $0 AND prodId <= $1' , start, end); // TODO Logic fetch Data
let paggingData =[];
paggingData = JSON.parse(JSON.stringify(this.state.paggingData));
Object.keys(newData).map(key => {
paggingData.push(newData[key])
})
this.setState({
paggingData
}, () => {
console.log('Search-------------------------------PAGGGING DATA \n', this.state.paggingData)
})
this.setState({dataProvider: dataProvider.cloneWithRows(paggingData)}) //TODO ... working in RecyclerView both
}
onScroll = () => {
console.log("Scrolling");
}
onEndReached = () => {
console.log("\n\n\n\n\--------------------------------------------Reached to End---------------------------------------------");
start = end;
end = end+100;
this.fetchData()
}
<RecyclerListView
layoutProvider={this.layoutProvider}
dataProvider={this.state.dataProvider}
rowRenderer={this.rowRenderer}
onEndReached={this.onEndReached}
onScroll={this.onScroll}
/>