Suppose there is a live WAV stream that can be reached at a certain URL, and we need to stream it with as little latency as possible. Using HTML5 <audio> for this task is a no-go, because browsers attempt to pre-buffer several seconds of the stream, and the latency goes up accordingly. That's the reason behind using Flash for this task. However, due to my inexperience with this technology, I only managed to get occasional clicks and white noise. What's wrong in the code below? Thanks.
var soundBuffer: ByteArray = new ByteArray();
var soundStream: URLStream = new URLStream();
soundStream.addEventListener(ProgressEvent.PROGRESS, readSound);
soundStream.load(new URLRequest(WAV_FILE_URL));
var sound = new Sound();
sound.addEventListener(SampleDataEvent.SAMPLE_DATA,playSound);
sound.play();
function readSound(event:ProgressEvent):void {
soundStream.readBytes(soundBuffer, 0, soundStream.bytesAvailable);
}
function playSound(event:SampleDataEvent):void {
/* The docs say that if we send too few samples,
Sound will consider it an EOF */
var samples:int = (soundBuffer.length - soundBuffer.position) / 4
var toadd:int = 4096 - samples;
try {
for (var c: int=0; c < samples; c++) {
var n:Number = soundBuffer.readFloat();
event.data.writeFloat(n);
event.data.writeFloat(n);
}
} catch(e:Error) {
ExternalInterface.call("errorReport", e.message);
}
for (var d: int = 0; d < toadd; d++) {
event.data.writeFloat(0);
event.data.writeFloat(0);
}
}
Like The_asMan pointed out, playing a wav file is not that easy. See as3wavsound for an example.
If your goal is low latency, the best option would be to convert to MP3, so you can use just use a SoundLoaderContext.
Related
How do I load an .flv (lets call it "library.flv") sitting in my (internal) Flash library into my Netstream?
I can easily load external .flv's with the below, but I need to load the .flv from my library
//Creating the video object
var ADFvideo = new Video(110,180);
videoContainer.addChild(ADFvideo);
ADFvideo.x = 0;
ADFvideo.y = 0;
//Loading the flv into the video object
var ADFnc:NetConnection = new NetConnection();
ADFnc.connect(null);
var ADFns:NetStream = new NetStream(ADFnc);
ADFvideo.attachNetStream(ADFns);
//Handling metaData (it arrives as the video starts playing, so it is the pefect time to fire the first event);
var ADFcustomClient:Object = new Object();
ADFcustomClient.onMetaData = ADFmetaDataHandler;
ADFns.client = ADFcustomClient;
var ADFinfoObj:Object;
function ADFmetaDataHandler(ADFinfo:Object):void
{
//meta stuff
}
ADFns.play("files/external.flv"); /* current way of loading the external .flv */
ADFns.addEventListener(NetStatusEvent.NET_STATUS, statusChanged);
function statusChanged(stats:NetStatusEvent)
{
//event changes
}
I don't think you can use NetStream with an embedded flv video. When you import the video to your library, you will have the option to convert it to a movieclip or to keep it as a simple video that will sit on the timeline. For the later option, you can't create an AS Linkage. It is not recommended for longer clip as you will experience syncronisation issue, playback problem and pre-loading issues.
You could try this adjustment of your posted code as a starting point.. Tested using 3-second video (H.263 / Sorenson) of 1280 width by 720 height with 44100hz 128 MP3 sound and there was no "..experiencing sluggish behaviour" on my side with a crappy testing machine.
When the video ends you need to use your function statusChanged(stats:NetStatusEvent) to handle what happens next (I've edited the code to do constant replays as example)
Anyway this code means the final compiled SWF and FLV exist as one package. So a 200kb FLV will add +200kb to the output SWF size. Just bear in mind especially with large / multiple FLV embeds.
//// Specify embed item and create Class to store such item
[ Embed(source = "yourFileName.flv", mimeType = "application/octet-stream") ]
var bytes_FLV : Class;
//// End embedded item setup
//create bytes for NStream from embedded item
var file_BA:ByteArray = new bytes_FLV();
//Creating the video object
var ADFvideo = new Video(110,180);
videoContainer.addChild(ADFvideo);
ADFvideo.x = 0;
ADFvideo.y = 0;
//Loading the flv into the video object
var ADFnc:NetConnection = new NetConnection();
ADFnc.connect(null);
var ADFns:NetStream = new NetStream(ADFnc);
ADFvideo.attachNetStream(ADFns);
//Handling metaData (it arrives as the video starts playing, so it is the pefect time to fire the first event);
var ADFcustomClient:Object = new Object();
ADFcustomClient.onMetaData = ADFmetaDataHandler;
ADFns.client = ADFcustomClient;
var ADFinfoObj:Object;
function ADFmetaDataHandler(ADFinfo:Object):void
{
//meta stuff
}
///ADFns.play("files/external.flv"); /* current way of loading the external .flv */
//Send file_BA to NStream for playback..
ADFns.play(null);
ADFns.appendBytesAction(NetStreamAppendBytesAction.RESET_BEGIN);
ADFns.appendBytes(file_BA);
ADFns.addEventListener(NetStatusEvent.NET_STATUS, statusChanged);
function statusChanged(stats:NetStatusEvent)
{
//event changes
//trace("NetStream Status : " + stats.info.code);
if (stats.info.code == "NetStream.Buffer.Empty")
{
//Buffer.Empty = video has ended..
ADFns.appendBytesAction(NetStreamAppendBytesAction.RESET_BEGIN);
ADFns.appendBytes(file_BA);
}
}
I'm trying to play sound from ByteArray captured from the microphone and I'm expecting to hear the sound from the microphone but what I get is only random, distorted sound. This is the code that I'm using now :
var playBa:ByteArray;
var player:Sound = new Sound();
sound.addEventListener(SampleDataEvent.SAMPLE_DATA, playMic);
sound.play();
var mic:Microphone = Microphone.getMicrophone();
mic.gain = 100;
mic.rate = 44;
mic.setSilenceLevel(0, 4000);
mic.addEventListener(SampleDataEvent.SAMPLE_DATA, onMicSample);
function playMic(e:SampleDataEvent):void
{
if (playBa != null) e.data.writeBytes(playBa, 0, playBa.length);
}
function onMicSample(e:SampleDataEvent):void
{
playBa = e.data;
playBa.position = 0;
player.play();
}
And after a few seconds of distorted sounds, this is what I got on the Output Window:
RangeError: Error #2004: One of the parameters is invalid.
at flash.media::Sound/play()
at vclass_fla::MainTimeline/onMicSample()
Please help me. I'm a totally new to AS3 and here. Any help are highly appreciated. Thanks.
Some Microphones are very sensitive. You need to adjust moderate settings like shown below. Also, if speakers are closer to microphone the sound get looped.
var myMic:Microphone = Microphone.getMicrophone(); // detect microphone
myMic.gain = 50;
myMic.setUseEchoSuppression(true);
myMic.setLoopBack(true);
myMic.setSilenceLevel(50, 1000);
To more about Sound Capturing try this,
Capturing microphone sound data
The Problem is that sample data event requires blocks of 2048 to 8192 samples. If the samples recorded by microphone at the call of "playMic" the runtime throw "RangeError: Error #2004...".
function playMic(event:SampleDataEvent):void
{
trace("sample event");
for (var ii:uint = 0; ii < 8192 && playBa.bytesAvailable > 0 ; ii++)
{
var n1:Number = playBa.readFloat();
//trace(n1);
event.data.writeFloat(n1);
event.data.writeFloat(n1);
if(playBa.bytesAvailable == 0)
{
trace("data_finished");
break;
}
}
}
Now I have a code like this:
soundData = new ByteArray();
microphone = Microphone.getMicrophone();
microphone.codec = SoundCodec.SPEEX;
microphone.rate = 8;
microphone.gain = 100;
microphone.addEventListener(SampleDataEvent.SAMPLE_DATA, micSampleDataHandler);
function micSampleDataHandler(event:SampleDataEvent):void {
while (event.data.bytesAvailable) {
var sample:Number = event.data.readFloat();
soundData.writeFloat(sample);
}
}
The raw data is recorded from the microphone. How do I go about casting it to a ByteArray after using SPEEX codec compression? Note that the converted data must play back.
refer a this code.
soundData.position=0;
var soundOutput:Sound = new Sound();
soundOutput.addEventListener(SampleDataEvent.SAMPLE_DATA, playSound);
soundOutput.play();
function playSound(soundOutput:SampleDataEvent):void {
if (! soundData.bytesAvailable>0)
{
return;
}
for (var i:int = 0; i < 8192; i++)
{
var sample:Number=0;
if (soundData.bytesAvailable>0)
{
sample=soundData.readFloat();
}
soundOutput.data.writeFloat(sample);
soundOutput.data.writeFloat(sample);
}
}
using a SoundCodec.SPEEX above code playrate not is 1x you should correct playSound function. maybe you tested. if you remove microphone.codec = SoundCodec.SPEEX; know.
More information: Adobe Official Capturing sound input
have a some problem when recorded in speex.
refer a follow artice.
http://forums.adobe.com/message/3571251#3571251
http://forums.adobe.com/message/3584747
If the SoundFormat indicates Speex, the audio is compressed mono sampled at 16 kHz. In flash, a sound object plays at 44khz. Since you're sampling at 16khz(Speex), you're sending data through the SampleDataEvent Event handler 2.75 faster then you are getting that data.
so, you must changed the playSound for(or while) loop.
I recommend following site. this article is 'how to playrate adjust?' great tutorial.
http://www.kelvinluck.com/2008/11/first-steps-with-flash-10-audio-programming/
I am trying to play a flv video file in flex 4.5 with netStream byteArray. What I am doing is below:
Creating a netStream and video object
Attaching a netStream with video
Reading flv file in byteArray
Append byteArray in netStream using "appendBytes" method
Playing video
In this scenario Play, Pause, Stop functionalities are working fine with video.
But when I am trying to seeking in video then it is not working.
You can follow the code what I am doing by clicking on the link http://pastebin.com/fZp0mKDs
Can anybody tell me, where am I am going wrong to implement seeking.
Any code sample or any kind of help would be appreciated.
I got, the code below worked in my case
// onmetadata function get all timestamp and corresponding fileposition..
function onMetaData(informationObject:Object):void
{
for (var propertyName:String in informationObject)
{
if (propertyName == "keyframes")
{
var kfObject:Object = informationObject[propertyName];
var timeArray:Array = kfObject["times"];
var filePositionArray:Array = kfObject["filepositions"];
for(var i:int=0;i<timeArray.length;i++)
{
var tagPosition:int = filePositionArray[i];//Read the tag size;
var timestamp:Number = timeArray[i];//read the timestamp;
tags.push({timestamp:timestamp,tagPosition:tagPosition});
}
}
}
}
// onseek click get approximate timestamp and its fileposition
protected function seek_click(seektime:Number):void
{
var currentTime:Number = 0;
var previousTime:Number = 0;
for (var i:int=1; i<tags.length; i++)
{
currentTime = tags[i].timestamp;
previousTime = tags[i-1].timestamp;
if(previousTime < seektime)
{
if(seektime < currentTime)
{
seekPos = tags[i-1].tagPosition;
stream.seek(previousTime);
break;
}
}
}
}
// append bytes on seekposition
private function netStatusHandler(event:NetStatusEvent):void
{
switch (event.info.code)
{
case "NetStream.Seek.Notify" :
stream.appendBytesAction(NetStreamAppendBytesAction.RESET_SEEK);
totalfilePositionArray.position = seekPos;
var bytes:filePositionArray = new filePositionArray();
totalfilePositionArray.readBytes(bytes);
stream.appendBytes(bytes);
stream.resume();
break;
}
}
For inject MetaData keyframes into flv file.Use some injector tool, fe. FLV MetaData Injector
http://www.buraks.com/flvmdi/
I think there is a problem in seeking of byteArray constructed after reading file. Just play you netStream directly, it works:
var fileName:String = "dummy-video.flv";
ns.play(fileName);
I have been experimenting with Firefox's Audio API to detecting silence in audio. (The point is to enable semi-automated transcription.)
Surprisingly, this simple code more or less suffices to detect silence and pause:
var audio = document.getElementsByTagName("audio")[0];
audio.addEventListener("MozAudioAvailable", pauseOnSilence, false);
function pauseOnSilence(event) {
var val = event.frameBuffer[0];
if (Math.abs(val) < .0001){
audio.pause();
}
}
It's imperfect but as a proof of concept, I'm convinced.
My question now is, is there way to do the same thing in Webkit's Audio API? From what I've seen of it it's more oriented toward synthesize than sound processing (but perhaps I'm wrong?).
(I wish the Webkit team would just implement the same interface that Mozilla has created, and then move on to their fancier stuff...)
You should be able to do something like this using an AnalyzerNode, or perhaps looking for thresholding using a JavaScriptAudioNode.
For example:
meter.onaudioprocess = function(e) {
var buffer = e.inputBuffer.getChannelData(0); // Left buffer only.
// TODO: Do the same for right.
var isClipping = false;
// Iterate through buffer to check if any of the |values| exceeds 1.
for (var i = 0; i < buffer.length; i++) {
var absValue = Math.abs(buffer[i]);
if (absValue >= 1) {
isClipping = true;
break;
}
}
this.isClipping = isClipping;
if (isClipping) {
this.lastClipTime = new Date();
}
};
Rather than clipping, you can simply check for low enough levels.
Roughly adapted from this tutorial. Specific sample is here.