實時更改兩個音訊源
此示例顯示如何使用兩個音訊源,並根據另一個更改其中一個音訊源。在這種情況下,我們建立一個音訊 Ducker,如果輔助音軌產生聲音,它將降低主音軌的音量。
ScriptProcessorNode 將定期事件傳送到其 audioprocess 處理程式。在這個連結到輔助音訊源的處理程式中,我們計算音訊的響度,並用它來改變主音訊源上的動態壓縮器。然後將兩者傳送到使用者的揚聲器/耳機。結果是當在次要音訊軌道中檢測到聲音時主要音訊軌道中的音量變化非常突然。我們可以通過使用平均值來使這更平滑,並且在檢測到輔助音訊之前使用延遲線來改變音量,但是在該示例中該過程應該是清楚的。
//The DuckingNode will heavily compress the primary source when the secondary source produces sound
class DuckingNode {
constructor(context) {
let blocksize = 2048;
let normalThreshold = -50;
let duckThreshold = 0.15;
//Creating nodes
this.compressor = context.createDynamicsCompressor();
this.processor = context.createScriptProcessor(blocksize, 2, 2);
//Configuring nodes
this.compressor.threshold.value = normalThreshold;
this.compressor.knee.value = 0;
this.compressor.ratio.value = 12;
this.compressor.reduction.value = -20;
this.compressor.attack.value = 0;
this.compressor.release.value = 1.5;
let self = this;
this.processor.onaudioprocess = function(audioProcessingEvent) {
let inputBuffer = audioProcessingEvent.inputBuffer;
let outputBuffer = audioProcessingEvent.outputBuffer;
let rms;
let total = 0.0;
let len = blocksize * outputBuffer.numberOfChannels;
for (let channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
let inputData = inputBuffer.getChannelData(channel);
let outputData = outputBuffer.getChannelData(channel);
for (let sample = 0; sample < inputBuffer.length; sample++) {
// make output equal to the same as the input
outputData[sample] = inputData[sample];
total += Math.abs(inputData[sample]);
}
}
//Root mean square
rms = Math.sqrt( total / len );
//We set the threshold to at least 'normalThreshold'
self.compressor.threshold.value = normalThreshold + Math.min(rms - duckThreshold, 0) * 5 * normalThreshold;
}
}
get primaryInput () {
return this.compressor;
}
get secondaryInput () {
return this.processor;
}
connectPrimary(node) {
this.compressor.connect(node);
}
connectSecondary(node) {
this.processor.connect(node);
}
};
let audioContext = new (window.AudioContext || window.webkitAudioContext)();
//Select two <audio> elements on the page. Ideally they have the autoplay attribute
let musicElement = document.getElementById("music");
let voiceoverElement = document.getElementById("voiceover");
//We create source nodes from them
let musicSourceNode = audioContext.createMediaElementSource(musicElement);
let voiceoverSourceNode = audioContext.createMediaElementSource(voiceoverElement);
let duckingNode = new DuckingNode(audioContext);
//Connect everything up
musicSourceNode.connect(duckingNode.primaryInput);
voiceoverSourceNode.connect(duckingNode.secondaryInput);
duckingNode.connectPrimary(audioContext.destination);
duckingNode.connectSecondary(audioContext.destination);