Web Audio API提供了强大的音频处理能力,可以在Web应用中实现音频播放、合成、分析和效果处理。本章将介绍Web Audio API的基本概念和使用方法。
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
console.log('采样率:', audioContext.sampleRate);
console.log('当前时间:', audioContext.currentTime);
console.log('状态:', audioContext.state);
async function initAudio() {
if (audioContext.state === 'suspended') {
await audioContext.resume();
}
}
document.addEventListener('click', initAudio, { once: true });
audioContext.close().then(() => {
console.log('音频上下文已关闭');
});
const audioNodes = {
source: {
description: '音频源节点',
types: ['OscillatorNode', 'AudioBufferSourceNode', 'MediaElementAudioSourceNode', 'MediaStreamAudioSourceNode']
},
effect: {
description: '效果节点',
types: ['GainNode', 'BiquadFilterNode', 'ConvolverNode', 'DelayNode', 'DynamicsCompressorNode']
},
destination: {
description: '输出节点',
types: ['AudioDestinationNode']
},
analyzer: {
description: '分析节点',
types: ['AnalyserNode']
}
};
const oscillator = audioContext.createOscillator();
oscillator.type = 'sine';
oscillator.type = 'square';
oscillator.type = 'sawtooth';
oscillator.type = 'triangle';
oscillator.frequency.value = 440;
oscillator.frequency.setValueAtTime(440, audioContext.currentTime);
oscillator.frequency.linearRampToValueAtTime(880, audioContext.currentTime + 1);
oscillator.detune.value = 0;
oscillator.connect(audioContext.destination);
oscillator.start();
oscillator.start(audioContext.currentTime + 1);
oscillator.stop();
oscillator.stop(audioContext.currentTime + 2);
function playNote(frequency, duration, type = 'sine') {
const osc = audioContext.createOscillator();
const gain = audioContext.createGain();
osc.type = type;
osc.frequency.value = frequency;
gain.gain.setValueAtTime(0.3, audioContext.currentTime);
gain.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + duration);
osc.connect(gain);
gain.connect(audioContext.destination);
osc.start();
osc.stop(audioContext.currentTime + duration);
}
playNote(440, 0.5);
playNote(523.25, 0.5);
playNote(659.25, 0.5);
async function loadAudioFile(url) {
const response = await fetch(url);
const arrayBuffer = await response.arrayBuffer();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
return audioBuffer;
}
function playAudioBuffer(audioBuffer) {
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(audioContext.destination);
source.start();
return source;
}
const buffer = await loadAudioFile('sound.mp3');
const source = playAudioBuffer(buffer);
source.loop = true;
source.loopStart = 0;
source.loopEnd = audioBuffer.duration;
source.playbackRate.value = 1.0;
const audioElement = document.querySelector('audio');
const source = audioContext.createMediaElementSource(audioElement);
const gainNode = audioContext.createGain();
source.connect(gainNode);
gainNode.connect(audioContext.destination);
audioElement.play();
audioElement.addEventListener('ended', () => {
console.log('音频播放结束');
});
async function getMicrophoneStream() {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const source = audioContext.createMediaStreamSource(stream);
return { stream, source };
}
const { stream, source } = await getMicrophoneStream();
const analyser = audioContext.createAnalyser();
source.connect(analyser);
const dataArray = new Uint8Array(analyser.frequencyBinCount);
function visualize() {
analyser.getByteFrequencyData(dataArray);
console.log('音频数据:', dataArray);
requestAnimationFrame(visualize);
}
visualize();
const gainNode = audioContext.createGain();
gainNode.gain.value = 0.5;
gainNode.gain.setValueAtTime(0.5, audioContext.currentTime);
gainNode.gain.linearRampToValueAtTime(0, audioContext.currentTime + 1);
gainNode.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + 1);
function fadeIn(gainNode, duration) {
gainNode.gain.setValueAtTime(0, audioContext.currentTime);
gainNode.gain.linearRampToValueAtTime(1, audioContext.currentTime + duration);
}
function fadeOut(gainNode, duration) {
gainNode.gain.setValueAtTime(gainNode.gain.value, audioContext.currentTime);
gainNode.gain.exponentialRampToValueAtTime(0.01, audioContext.currentTime + duration);
}
const filter = audioContext.createBiquadFilter();
filter.type = 'lowpass';
filter.type = 'highpass';
filter.type = 'bandpass';
filter.type = 'notch';
filter.type = 'peaking';
filter.type = 'lowshelf';
filter.type = 'highshelf';
filter.frequency.value = 1000;
filter.Q.value = 1;
filter.gain.value = 0;
const lowpass = audioContext.createBiquadFilter();
lowpass.type = 'lowpass';
lowpass.frequency.value = 2000;
const highpass = audioContext.createBiquadFilter();
highpass.type = 'highpass';
highpass.frequency.value = 200;
source.connect(lowpass);
lowpass.connect(highpass);
highpass.connect(audioContext.destination);
const delayNode = audioContext.createDelay(5.0);
delayNode.delayTime.value = 0.5;
const feedbackGain = audioContext.createGain();
feedbackGain.gain.value = 0.5;
source.connect(delayNode);
delayNode.connect(audioContext.destination);
delayNode.connect(feedbackGain);
feedbackGain.connect(delayNode);
const dryGain = audioContext.createGain();
const wetGain = audioContext.createGain();
source.connect(dryGain);
source.connect(delayNode);
delayNode.connect(wetGain);
dryGain.connect(audioContext.destination);
wetGain.connect(audioContext.destination);
dryGain.gain.value = 0.7;
wetGain.gain.value = 0.3;
const compressor = audioContext.createDynamicsCompressor();
compressor.threshold.value = -24;
compressor.knee.value = 30;
compressor.ratio.value = 12;
compressor.attack.value = 0.003;
compressor.release.value = 0.25;
source.connect(compressor);
compressor.connect(audioContext.destination);
async function createReverb(url) {
const response = await fetch(url);
const arrayBuffer = await response.arrayBuffer();
const impulseBuffer = await audioContext.decodeAudioData(arrayBuffer);
const convolver = audioContext.createConvolver();
convolver.buffer = impulseBuffer;
return convolver;
}
const reverb = await createReverb('impulse-response.wav');
const dryGain = audioContext.createGain();
const wetGain = audioContext.createGain();
source.connect(dryGain);
source.connect(reverb);
reverb.connect(wetGain);
dryGain.connect(audioContext.destination);
wetGain.connect(audioContext.destination);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
analyser.smoothingTimeConstant = 0.8;
const bufferLength = analyser.frequencyBinCount;
const frequencyData = new Uint8Array(bufferLength);
const timeData = new Uint8Array(bufferLength);
const floatTimeData = new Float32Array(bufferLength);
function analyze() {
analyser.getByteFrequencyData(frequencyData);
analyser.getByteTimeDomainData(timeData);
analyser.getFloatTimeDomainData(floatTimeData);
requestAnimationFrame(analyze);
}
source.connect(analyser);
analyser.connect(audioContext.destination);
const canvas = document.getElementById('visualizer');
const ctx = canvas.getContext('2d');
function drawFrequency() {
requestAnimationFrame(drawFrequency);
analyser.getByteFrequencyData(frequencyData);
ctx.fillStyle = 'rgb(0, 0, 0)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
const barWidth = (canvas.width / bufferLength) * 2.5;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
const barHeight = (frequencyData[i] / 255) * canvas.height;
const hue = (i / bufferLength) * 360;
ctx.fillStyle = `hsl(${hue}, 100%, 50%)`;
ctx.fillRect(x, canvas.height - barHeight, barWidth, barHeight);
x += barWidth + 1;
}
}
function drawWaveform() {
requestAnimationFrame(drawWaveform);
analyser.getByteTimeDomainData(timeData);
ctx.fillStyle = 'rgb(0, 0, 0)';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.lineWidth = 2;
ctx.strokeStyle = 'rgb(0, 255, 0)';
ctx.beginPath();
const sliceWidth = canvas.width / bufferLength;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
const v = timeData[i] / 128.0;
const y = (v * canvas.height) / 2;
if (i === 0) {
ctx.moveTo(x, y);
} else {
ctx.lineTo(x, y);
}
x += sliceWidth;
}
ctx.lineTo(canvas.width, canvas.height / 2);
ctx.stroke();
}
function getVolume() {
analyser.getFloatTimeDomainData(floatTimeData);
let sum = 0;
for (let i = 0; i < floatTimeData.length; i++) {
sum += floatTimeData[i] * floatTimeData[i];
}
const rms = Math.sqrt(sum / floatTimeData.length);
const db = 20 * Math.log10(rms);
return { rms, db };
}
function detectClap(threshold = 0.5) {
const { rms } = getVolume();
if (rms > threshold) {
console.log('检测到拍手!');
return true;
}
return false;
}
class VolumeMeter {
constructor(analyser) {
this.analyser = analyser;
this.dataArray = new Float32Array(analyser.fftSize);
}
getLevel() {
this.analyser.getFloatTimeDomainData(this.dataArray);
let sum = 0;
for (let i = 0; i < this.dataArray.length; i++) {
sum += this.dataArray[i] * this.dataArray[i];
}
return Math.sqrt(sum / this.dataArray.length);
}
getDecibels() {
const level = this.getLevel();
if (level === 0) return -Infinity;
return 20 * Math.log10(level);
}
}
class Synthesizer {
constructor(audioContext) {
this.audioContext = audioContext;
this.oscillators = new Map();
this.masterGain = audioContext.createGain();
this.masterGain.connect(audioContext.destination);
this.masterGain.gain.value = 0.3;
}
noteOn(frequency, type = 'sawtooth') {
if (this.oscillators.has(frequency)) return;
const osc = this.audioContext.createOscillator();
const gain = this.audioContext.createGain();
osc.type = type;
osc.frequency.value = frequency;
gain.gain.setValueAtTime(0, this.audioContext.currentTime);
gain.gain.linearRampToValueAtTime(0.5, this.audioContext.currentTime + 0.01);
osc.connect(gain);
gain.connect(this.masterGain);
osc.start();
this.oscillators.set(frequency, { osc, gain });
}
noteOff(frequency) {
const oscillator = this.oscillators.get(frequency);
if (!oscillator) return;
const { osc, gain } = oscillator;
gain.gain.setValueAtTime(gain.gain.value, this.audioContext.currentTime);
gain.gain.exponentialRampToValueAtTime(0.01, this.audioContext.currentTime + 0.3);
osc.stop(this.audioContext.currentTime + 0.3);
this.oscillators.delete(frequency);
}
setVolume(value) {
this.masterGain.gain.value = value;
}
}
const synth = new Synthesizer(audioContext);
const noteFrequencies = {
'C4': 261.63,
'D4': 293.66,
'E4': 329.63,
'F4': 349.23,
'G4': 392.00,
'A4': 440.00,
'B4': 493.88,
'C5': 523.25
};
document.addEventListener('keydown', (e) => {
const note = e.key.toUpperCase();
if (noteFrequencies[note]) {
synth.noteOn(noteFrequencies[note]);
}
});
document.addEventListener('keyup', (e) => {
const note = e.key.toUpperCase();
if (noteFrequencies[note]) {
synth.noteOff(noteFrequencies[note]);
}
});
class Sequencer {
constructor(audioContext, bpm = 120, steps = 16) {
this.audioContext = audioContext;
this.bpm = bpm;
this.steps = steps;
this.pattern = new Array(steps).fill(null).map(() => []);
this.isPlaying = false;
this.currentStep = 0;
this.synth = new Synthesizer(audioContext);
}
setNote(step, frequency) {
const index = step % this.steps;
if (!this.pattern[index].includes(frequency)) {
this.pattern[index].push(frequency);
}
}
removeNote(step, frequency) {
const index = step % this.steps;
const noteIndex = this.pattern[index].indexOf(frequency);
if (noteIndex > -1) {
this.pattern[index].splice(noteIndex, 1);
}
}
start() {
if (this.isPlaying) return;
this.isPlaying = true;
this.scheduleNext();
}
stop() {
this.isPlaying = false;
this.currentStep = 0;
}
scheduleNext() {
if (!this.isPlaying) return;
const stepDuration = 60 / this.bpm / 4;
const currentTime = this.audioContext.currentTime;
const notes = this.pattern[this.currentStep];
notes.forEach(freq => {
this.synth.noteOn(freq);
setTimeout(() => this.synth.noteOff(freq), stepDuration * 1000 * 0.9);
});
this.currentStep = (this.currentStep + 1) % this.steps;
setTimeout(() => this.scheduleNext(), stepDuration * 1000);
}
setBPM(bpm) {
this.bpm = bpm;
}
}
🎵 Web Audio API最佳实践
- 用户交互:AudioContext需要在用户交互后创建或resume
- 资源管理:及时断开节点连接,释放资源
- 性能优化:避免在动画循环中创建新对象
- 错误处理:处理音频解码失败等异常情况
🔊 音频处理链
典型的音频处理链:
音频源 -> 滤波器 -> 压缩器 -> 增益 -> 分析器 -> 输出
下一章将探讨 WebRTC,学习如何在浏览器中实现实时通信。