The following examples illustrate WebRTC integration with the Web Audio API. They are borrowed and modified from Robert O'Callahan's MediaStream Processing API proposal.
Please note the addition of two new AudioContext methods: createMediaStreamSource() and createMediaStreamDestination(). They need to be documented more fully, but provide a simple and straightforward integration with the Web Audio API.
<video src="foo.webm" id="v" controls></video>
<script>
var audioSource = context.createMediaElementSource(document.getElementById("v"));
var filter = context.createBiquadFilter();
audioSource.connect(filter);
filter.connect(context.destination);
</script>
<video src="foo.webm" id="v"></video>
<audio src="back.webm" id="back"></audio>
<script>
var videoSource = context.createMediaElementSource(document.getElementById("v"));
var audioSource = context.createMediaElementSource(document.getElementById("back"));
var effect = context.createJavaScriptNode();
effect.onaudioprocess = customAudioProcessor;
videoSource.connect(effect);
effect.connect(context.destination);
audioSource.connect(context.destination);
function startPlaying() {
document.getElementById("v").play();
document.getElementById("back").play();
}
</script>
<script>
navigator.getUserMedia('audio', gotAudio);
function gotAudio(stream) {
var microphone = context.createMediaStreamSource(stream);
var filter = context.createBiquadFilter();
var peer = context.createMediaStreamDestination();
microphone.connect(filter);
filter.connect(peer);
peerConnection.addStream(peer.stream);
}
</script>
<canvas id="c"></canvas>
<script>
navigator.getUserMedia('audio', gotAudio);
var streamRecorder;
function gotAudio(stream) {
var microphone = context.createMediaStreamSource(stream);
var analyser = context.createAnalyser();
microphone.connect(analyser);
analyser.connect(context.destination);
requestAnimationFrame(drawAnimation);
streamRecorder = stream.record();
peerConnection.addStream(stream);
}
</script>
<canvas id="c"></canvas>
<audio src="back.webm" id="back"></audio>
<script>
navigator.getUserMedia('audio', gotAudio);
var streamRecorder;
function gotAudio(stream) {
var microphone = context.createMediaStreamSource(stream);
var backgroundMusic = context.createMediaElementSource(document.getElementById("back"));
var analyser = context.createAnalyser();
var mixedOutput = context.createMediaStreamDestination();
microphone.connect(analyser);
analyser.connect(mixedOutput);
backgroundMusic.connect(mixedOutput);
requestAnimationFrame(drawAnimation);
streamRecorder = mixedOutput.stream.record();
peerConnection.addStream(mixedOutput.stream);
}
</script>
<audio id="out" autoplay></audio>
<script>
peerConnection.onaddstream = function(event) {
var peerInput = context.createMediaStreamSource(event.stream);
var panner = context.createPanner();
panner.setPosition(x, y, z);
peerInput.connect(panner);
panner.connect(context.destination);
};
</script>
<audio src="in1.webm" id="in1" preload></audio>
<audio src="in2.webm" id="in2"></audio>
<script>
var in1 = document.getElementById("in1");
var in2 = document.getElementById("in2");
in1.onloadeddata = function() {
in1.onended = function() { in2.play(); };
in1.play();
}
</script>
<audio src="in1.webm" id="in1" preload></audio>
<audio src="in2.webm" id="in2"></audio>
<script>
var in1 = document.getElementById("in1");
var in2 = document.getElementById("in2");
var source1 = context.createMediaElementSource(in1);
var source2 = context.createMediaElementSource(in2);
source1.connect(context.destination);
source2.connect(context.destination);
source1.play();
function switchStreams() {
in2.currentTime = in1.currentTime + 10; // arbitrary, but we should be able to complete the seek within this time
var switchTime = context.currentTime + 10;
in1.pause(switchTime);
in2.play(switchTime);
}
</script>
<script>
var processor = context.createJavaScriptNode(2048, 2, 2);
processor.onaudioprocess = customAudioProcess;
processor.connect(context.destination);
</script>
<script>
var source = context.createBufferSource();
source.buffer = kickDrum909;
source.connect(context.destination);
source.noteOn(0);
</script>
<script>
var source = context.createBufferSource();
source.buffer = kickDrum909;
var effect = context.createConvolver();
effect.buffer = immenseCathedral;
source.connect(effect);
effect.connect(context.destination);
source.noteOn(context.currentTime + 5);
</script>
<script>
navigator.getUserMedia('video', gotVideo);
function gotVideo(stream) {
stream.createWorkerProcessor(new Worker("face-recognizer.js"));
}
</script>
<script>
navigator.getUserMedia('video', gotVideo);
var streamRecorder;
function gotVideo(stream) {
streamRecorder = stream.record();
}
function stopRecording() {
streamRecorder.getRecordedData(gotData);
}
function gotData(blob) {
var x = new XMLHttpRequest();
x.open('POST', 'uploadMessage');
x.send(blob);
}
</script>
<canvas width="640" height="480" id="c"></canvas>
<script>
var canvas = document.getElementById("c");
var streamRecorder = canvas.stream.record();
function stopRecording() {
streamRecorder.getRecordedData(gotData);
}
function gotData(blob) {
var x = new XMLHttpRequest();
x.open('POST', 'uploadMessage');
x.send(blob);
}
var frame = 0;
function updateCanvas() {
var ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, 640, 480);
ctx.fillText("Frame " + frame, 0, 200);
++frame;
}
setInterval(updateCanvas, 30);
</script>