Open Jun711 opened 3 years ago
I attempted to use the detune
property to apply a manual correction of the pitch.
In this code, we define 3 melodies and transpose it to every key for a long playing sample. Each melody is written to a wavetable. This wavetable is applied to an AudioBufferSourceNode. It is started and played continuously until stopped.
These two lines within updatePlayback
source.playbackRate.value = ps.playbackRate;
source.detune.value = ps.detune
work when used independently. But when used together, as shown, neither one takes effect. So they appear to exhibit exclusive usage.
/**
* writes a single wavetable for each melody per CPS
* and has interactive playback rate control
*/
const CPS_SAMPLES = new Map();
let context = new(window.AudioContext || window.webkitAudioContext)();
/**
* Creates an array of samples for a given melody and CPS.
*
* @param {number} cps - Cycles per second.
* @param {Array<Array<number>>} melody - Array representing musical notes.
* @param {number} sampleRate - Audio sample rate.
* @returns {Float32Array} - Array of audio samples.
*/
function createSamples(cps, melody, sampleRate = 48000) {
let totalSamples = 0;
melody.forEach(([note, dur, vel]) => {
const nSamples = Math.floor((sampleRate * dur) / cps);
totalSamples += nSamples;
});
const samples = new Float32Array(totalSamples);
let sampleIndex = 0;
for (let i = 0; i < melody.length; i++) {
const [note, dur, vel] = melody[i];
const nSamples = Math.floor((sampleRate * dur) / cps);
const frequency = 440 * Math.pow(2, (note - 69) / 12);
const amp = Math.pow(10, vel / 127 - 1);
for (let j = 0; j < nSamples; j++) {
const time = j / sampleRate;
const sampleValue = amp * Math.sin(time * 2 * Math.PI * frequency);
samples[sampleIndex] = sampleValue;
sampleIndex++;
}
}
return samples;
}
/**
* Creates a Promise that resolves to an object containing an audio buffer and its duration.
*
* @param {AudioContext} context - Audio context.
* @param {number} cps - Cycles per second.
* @param {Array<Array<number>>} melody - Array representing musical notes.
* @returns {Promise<{buffer: AudioBuffer, dur: number}>} - Resolves to an object with audio buffer and duration.
*/
async function createWavetableSynth(context, cps, melody) {
return new Promise(async (resolve) => {
const sampleRate = Math.max(44100, context.sampleRate);
const key = `${cps}_${JSON.stringify(melody)}`
let samples = CPS_SAMPLES.get(key);
if (!samples) {
samples = createSamples(cps, melody, sampleRate);
CPS_SAMPLES.set(key, samples);
}
const buffer = context.createBuffer(1, samples.length, sampleRate);
const data = buffer.getChannelData(0);
data.set(samples);
resolve({
buffer,
dur: samples.length / sampleRate
});
});
}
/**
* Plays sequenced audio buffers.
*
* @param {AudioContext} context - Audio context.
* @param {Array<{buffer: AudioBuffer, dur: number}>} melodies - Array of melody objects.
*/
function playSequencedBuffers(context, melodies) {
melodies.forEach((obj) => {
const {
buffer,
dur
} = obj
let currentTime = context.currentTime;
const source = context.createBufferSource();
obj.source = source
source.buffer = buffer;
const gainNode = context.createGain();
source.connect(gainNode);
gainNode.connect(context.destination);
gainNode.gain.setValueAtTime(1, currentTime);
gainNode.gain.setValueAtTime(0, currentTime + dur);
source.start(currentTime);
source.stop(currentTime + dur);
});
}
const audioContext = new(window.AudioContext || window.webkitAudioContext)();
/**
* Given a melody, return a longer copy of it including all chromatic transpositions
* @param {Array<[number, number, number]>} melody - Previous cycles per second.
* @returns {Array<[number, number, number]>} melody - A new array with the updated melody
*/
function allKeys(melody) {
return [...new Array(11)].reduce(function go(acc, _, i) {
return acc.concat(melody.map(n => {
let m = [...n]
m[0] += i + 1
return m
}))
}, melody)
}
let midis = allKeys([
[50, 2, 100],
[52, 2, 55],
[54, 2, 66],
[55, 2, 77],
[57, 2, 88],
[59, 2, 99],
[61, 2, 111],
[62, 2, 122]
])
const midis2 = allKeys([
[62, 4, 80],
[61, 1, 70],
[59, 1, 70],
[64, 2, 90],
[74, 4, 80],
[73, 1, 70],
[71, 1, 70],
[76, 2, 90],
])
const midis3 = allKeys([
[88, 8, 80],
[90, 6, 70],
[93, 2, 70],
])
let buffers = []
let cps = 1
const button = document.createElement('button');
button.textContent = 'Play Trio';
document.body.appendChild(button);
const tempoSlider = document.createElement('input');
tempoSlider.type = 'range';
tempoSlider.min = '60';
tempoSlider.max = '200';
tempoSlider.value = '60';
tempoSlider.step = '1';
document.body.appendChild(tempoSlider);
const tempoLabel = document.createElement('label');
tempoLabel.textContent = `Tempo: ${tempoSlider.value} BPM`;
document.body.appendChild(tempoLabel);
tempoSlider.addEventListener('input', () => {
tempoLabel.textContent = `Tempo: ${tempoSlider.value} BPM`;
});
button.addEventListener('click', async () => {
if (context.state === 'running') {
context.close();
context = new(window.AudioContext || window.webkitAudioContext)();
}
cps = parseFloat(parseFloat(tempoSlider.value).toPrecision(1)) / 60;
const tempo = parseInt(tempoSlider.value, 10);
const secondsPerBeat = 60 / tempo;
buffers = [
await createWavetableSynth(context, cps, midis, secondsPerBeat),
await createWavetableSynth(context, cps, midis2, secondsPerBeat),
await createWavetableSynth(context, cps, midis3, secondsPerBeat)
]
playSequencedBuffers(context, buffers, cps, tempo);
});
/**
* Calculates playback parameters for a change in CPS.
*
* @param {number} oldCps - Previous cycles per second.
* @param {number} newCps - New cycles per second.
* @returns {{playbackRate: number, detune: number}}
*/
function calculatePlaybackParams(oldCps, newCps) {
const playbackRate = newCps / oldCps;
let detune = -1200 * Math.log2(playbackRate);
if (playbackRate < 1) {
detune *= -1
}
return {
playbackRate,
detune
};
}
tempoSlider.addEventListener('input', () => {
if (context.state !== 'running') {
return
}
tempoLabel.textContent = `Tempo: ${tempoSlider.value} BPM`;
const newCps = parseFloat(parseFloat(tempoSlider.value).toPrecision(1)) / 60;
let ps = calculatePlaybackParams(cps, newCps)
buffers.forEach(function updatePlayback(player) {
const {
source
} = player
source.playbackRate.value = ps.playbackRate;
source.detune.value = ps.detune
})
})
Thanks for reporting! There seems to be some alternative approaches here that look like they cover this use case:
I achieved the playbackRate effect using this library (SoundTouchJS), but personally, I feel that this API should be implemented officially!
I changed AudioBufferSourceNode.playbackRate using the code here https://github.com/mdn/webaudio-examples/blob/master/offline-audio-context-promise/index.html but found out that the pitch was not preserved. I looked up the documentation and found out that there is no pitch correction when using AudioBufferSourceNode.playbackRate.
If we change an audio's playbackRate using HTML audio element, the pitch would be preserved. Is there a way that I can obtain the same effect?
https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode