跳到主要内容

createScriptProcessor

2024年02月05日
柏拉文
越努力,越幸运

一、认识


audioContext.createScriptProcessor() 创建一个 ScriptProcessorNode 用于通过 JavaScript 直接处理音频。

二、语法


const audioCtx = new AudioContext();
myScriptProcessor = audioCtx.createScriptProcessor(
bufferSize,
numberOfInputChannels,
numberOfOutputChannels,
);
  • bufferSize: 缓冲区大小,以样本帧为单位。具体来讲,缓冲区大小必须是下面这些值当中的某一个:256, 512, 1024, 2048, 4096, 8192, 16384. 如果不传,或者参数为 0,则取当前环境最合适的缓冲区大小,取值为 2 的幂次方的一个常数,在该 node 的整个生命周期中都不变。 该取值控制着 audioprocess 事件被分派的频率,以及每一次调用多少样本帧被处理。较低 bufferSzie 将导致一定的延迟。较高的 bufferSzie 就要注意避免音频的崩溃和故障。推荐作者不要给定具体的缓冲区大小,让系统自己选一个好的值来平衡延迟和音频质量。

  • numberOfInputChannels: 值为整数,用于指定输入 node 的声道的数量,默认值是 2,最高能取 32.

  • numberOfOutputChannels: 值为整数,用于指定输出 node 的声道的数量,默认值是 2,最高能取 32.

三、场景


3.1 计算音频音量

const audioCanvas = document.getElementById('audio-canvas');
audioCanvas.width = 110;
audioCanvas.height = 8;
const audioCanvasCtx = audioCanvas.getContext('2d');

const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);

function getColor(volumePercent) {
if (volumePercent > 0.9 || volumePercent < 0.3) {
return '#B80000';
}
if (volumePercent > 0.6 || volumePercent < 0.4) {
return '#FAB400';
}
return '#21A564';
}

function drawVolume(volumePercent) {
audioCanvasCtx.clearRect(0, 0, audioCanvas.width, audioCanvas.height);
const xEndPos = volumePercent * audioCanvas.width;

audioCanvasCtx.lineWidth = 20;
const gradient = audioCanvasCtx.createLinearGradient(0, 0, xEndPos, 0);
const color = getColor(volumePercent);
gradient.addColorStop(0, color);
gradient.addColorStop(0.8, `${color}88`);
gradient.addColorStop(1, `${color}00`);
audioCanvasCtx.beginPath();
audioCanvasCtx.moveTo(0, 0);
audioCanvasCtx.lineTo(xEndPos, 0);
audioCanvasCtx.strokeStyle = gradient;
audioCanvasCtx.stroke();
audioCanvasCtx.closePath();
}

function getRMS(samples) {
const sum = samples.reduce((acc, curr) => acc + curr * curr, 0);
return Math.sqrt(sum / samples.length);
}

function rmsToDb(gain) {
return 20 * Math.log10(gain);
}

function getVolumePercent(dbValue) {
const minDb = -80;

if (dbValue < minDb) {
return 0;
} else if (dbValue > 1) {
return 1;
}

const volumePercent = (Math.abs(minDb) - Math.abs(dbValue)) / Math.abs(minDb);
return volumePercent;
}

async function audioRecorder() {
const scriptProcessor = audioContext.createScriptProcessor(1024, 1, 1);
source.connect(scriptProcessor);
scriptProcessor.connect(audioContext.destination);

scriptProcessor.onaudioprocess = event => {
const samples = event.inputBuffer.getChannelData(0);
const rms = getRMS(samples);
const db = rmsToDb(rms);
const volumePercent = getVolumePercent(db);
drawVolume(volumePercent);
};
}

audioRecorder();

3.2 获取音频样本

const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);

function to16kHz(audioData, sampleRate = 44100) {
const data = new Float32Array(audioData);
const fitCount = Math.round(data.length * (16000 / sampleRate));
const newData = new Float32Array(fitCount);
const springFactor = (data.length - 1) / (fitCount - 1);
newData[0] = data[0];
for (let i = 1; i < fitCount - 1; i++) {
const tmp = i * springFactor;
const before = Math.floor(tmp).toFixed();
const after = Math.ceil(tmp).toFixed();
const atPoint = tmp - before;
newData[i] = data[before] + (data[after] - data[before]) * atPoint;
}
newData[fitCount - 1] = data[data.length - 1];
return newData;
}

function to16BitPCM(input) {
const dataLength = input.length * (16 / 8);
const dataBuffer = new ArrayBuffer(dataLength);
const dataView = new DataView(dataBuffer);
let offset = 0;
for (let i = 0; i < input.length; i++, offset += 2) {
const s = Math.max(-1, Math.min(1, input[i]));
dataView.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
}
return dataView;
}

async function audioRecorder() {
const scriptProcessor = audioContext.createScriptProcessor(1024, 1, 1);
source.connect(scriptProcessor);
scriptProcessor.connect(audioContext.destination);

scriptProcessor.onaudioprocess = event => {
const samples = event.inputBuffer.getChannelData(0);
const output = to16kHz(samples);
const audioBuffer = to16BitPCM(output);
console.log("audioBuffer: ", audioBuffer);
}
}

audioRecorder();