Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 16 additions & 13 deletions packages/media/src/audio-iterator-manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import type {DelayPlaybackIfNotPremounting} from './delay-playback-if-not-premou
import type {Nonce} from './nonce-manager';
import {makePrewarmedAudioIteratorCache} from './prewarm-iterator-for-looping';
import {ALLOWED_GLOBAL_TIME_ANCHOR_SHIFT} from './set-global-time-anchor';
import type {SharedAudioContextForMediaPlayer} from './shared-audio-context-for-media-player';

const MAX_BUFFER_AHEAD_SECONDS = 8;

Expand All @@ -30,7 +31,7 @@ export const audioIteratorManager = ({
}: {
audioTrack: InputAudioTrack;
delayPlaybackHandleIfNotPremounting: () => DelayPlaybackIfNotPremounting;
sharedAudioContext: AudioContext;
sharedAudioContext: SharedAudioContextForMediaPlayer;
getIsLooping: () => boolean;
getEndTime: () => number;
getStartTime: () => number;
Expand All @@ -40,8 +41,8 @@ export const audioIteratorManager = ({
let muted = initialMuted;
let currentVolume = 1;

const gainNode = sharedAudioContext.createGain();
gainNode.connect(sharedAudioContext.destination);
const gainNode = sharedAudioContext.audioContext.createGain();
gainNode.connect(sharedAudioContext.audioContext.destination);

const audioSink = new AudioBufferSink(audioTrack);
const prewarmedAudioIteratorCache =
Expand All @@ -67,7 +68,7 @@ export const audioIteratorManager = ({
throw new Error('Audio buffer iterator not found');
}

if (sharedAudioContext.state !== 'running') {
if (sharedAudioContext.audioContext.state !== 'running') {
throw new Error(
'Tried to schedule node while audio context is not running',
);
Expand All @@ -77,7 +78,7 @@ export const audioIteratorManager = ({
return;
}

const node = sharedAudioContext.createBufferSource();
const node = sharedAudioContext.audioContext.createBufferSource();
node.buffer = buffer;
node.playbackRate.value = playbackRate;
node.connect(gainNode);
Expand Down Expand Up @@ -106,6 +107,7 @@ export const audioIteratorManager = ({
buffer,
scheduledTime: started.scheduledTime,
playbackRate,
scheduledAtAnchor: sharedAudioContext.audioSyncAnchor.value,
});
node.onended = () => {
// Some leniancy is needed as we find that sometimes onended is fired a bit too early
Expand Down Expand Up @@ -174,8 +176,9 @@ export const audioIteratorManager = ({

if (
getIsPlaying() &&
sharedAudioContext.state === 'running' &&
(sharedAudioContext.getOutputTimestamp().contextTime ?? 0) > 0
sharedAudioContext.audioContext.state === 'running' &&
(sharedAudioContext.audioContext.getOutputTimestamp().contextTime ?? 0) >
0
) {
resumeScheduledAudioChunks({
playbackRate,
Expand Down Expand Up @@ -236,12 +239,12 @@ export const audioIteratorManager = ({
using delayHandle = delayPlaybackHandleIfNotPremounting();
currentDelayHandle = delayHandle;

const iterator = makeAudioIterator(
const iterator = makeAudioIterator({
startFromSecond,
getEndTime(),
prewarmedAudioIteratorCache,
maximumTimestamp: getEndTime(),
cache: prewarmedAudioIteratorCache,
debugAudioScheduling,
);
});
audioIteratorsCreated++;
audioBufferIterator = iterator;

Expand Down Expand Up @@ -354,8 +357,8 @@ export const audioIteratorManager = ({
from:
queuedPeriod.from -
ALLOWED_GLOBAL_TIME_ANCHOR_SHIFT -
sharedAudioContext.baseLatency -
sharedAudioContext.outputLatency,
sharedAudioContext.audioContext.baseLatency -
sharedAudioContext.audioContext.outputLatency,
until: queuedPeriod.until,
}
: null;
Expand Down
49 changes: 33 additions & 16 deletions packages/media/src/audio/audio-preview-iterator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import {Internals} from 'remotion';
import {roundTo4Digits} from '../helpers/round-to-4-digits';
import type {PrewarmedAudioIteratorCache} from '../prewarm-iterator-for-looping';
import {ALLOWED_GLOBAL_TIME_ANCHOR_SHIFT} from '../set-global-time-anchor';
import type {SharedAudioContextForMediaPlayer} from '../shared-audio-context-for-media-player';

export const HEALTHY_BUFFER_THRESHOLD_SECONDS = 1;

Expand All @@ -12,19 +13,25 @@ export type QueuedNode = {
buffer: AudioBuffer;
scheduledTime: number;
playbackRate: number;
scheduledAtAnchor: number;
};

export type QueuedPeriod = {
from: number;
until: number;
};

export const makeAudioIterator = (
startFromSecond: number,
maximumTimestamp: number,
cache: PrewarmedAudioIteratorCache,
debugAudioScheduling: boolean,
) => {
export const makeAudioIterator = ({
startFromSecond,
maximumTimestamp,
cache,
debugAudioScheduling,
}: {
startFromSecond: number;
maximumTimestamp: number;
cache: PrewarmedAudioIteratorCache;
debugAudioScheduling: boolean;
}) => {
let destroyed = false;
const iterator = cache.makeIteratorOrUsePrewarmed(
startFromSecond,
Expand All @@ -39,27 +46,34 @@ export const makeAudioIterator = (
let pendingNext: Promise<IteratorResult<WrappedAudioBuffer, void>> | null =
null;

const cleanupAudioQueue = (audioContext: AudioContext) => {
const cleanupAudioQueue = (
audioContext: SharedAudioContextForMediaPlayer,
) => {
for (const node of queuedAudioNodes) {
try {
const currentlyHearing = audioContext.getOutputTimestamp().contextTime!;
const nodeEndTime =
node.scheduledTime + node.buffer.duration / node.playbackRate;

// When we unmount at the end of playback, we might not yet be done with audio anchors
// we should not stop the nodes
const isAlreadyPlaying =
node.scheduledTime - ALLOWED_GLOBAL_TIME_ANCHOR_SHIFT <
audioContext.currentTime;
audioContext.audioContext.currentTime;

const shouldKeep = isAlreadyPlaying;
// except for when the audio anchor changed (e.g. through a seek)
const wasScheduledForThisAnchor =
node.scheduledAtAnchor === audioContext.audioSyncAnchor.value;

if (shouldKeep) {
if (isAlreadyPlaying && wasScheduledForThisAnchor) {
continue;
}

if (debugAudioScheduling) {
const currentlyHearing =
audioContext.audioContext.getOutputTimestamp().contextTime!;
const nodeEndTime =
node.scheduledTime + node.buffer.duration / node.playbackRate;

Internals.Log.info(
{logLevel: 'trace', tag: 'audio-scheduling'},
`Stopping node ${node.timestamp.toFixed(3)}, currently hearing = ${currentlyHearing.toFixed(3)} currentTime = ${audioContext.currentTime.toFixed(3)} nodeEndTime = ${nodeEndTime.toFixed(3)} scheduledTime = ${node.scheduledTime.toFixed(3)}`,
`Stopping node ${node.timestamp.toFixed(3)}, currently hearing = ${currentlyHearing.toFixed(3)} currentTime = ${audioContext.audioContext.currentTime.toFixed(3)} nodeEndTime = ${nodeEndTime.toFixed(3)} scheduledTime = ${node.scheduledTime.toFixed(3)}`,
);
}

Expand Down Expand Up @@ -259,7 +273,7 @@ export const makeAudioIterator = (
};

return {
destroy: (audioContext: AudioContext) => {
destroy: (audioContext: SharedAudioContextForMediaPlayer) => {
cleanupAudioQueue(audioContext);
destroyed = true;
iterator.return().catch(() => undefined);
Expand All @@ -286,19 +300,22 @@ export const makeAudioIterator = (
buffer,
scheduledTime,
playbackRate,
scheduledAtAnchor,
}: {
node: AudioBufferSourceNode;
timestamp: number;
buffer: AudioBuffer;
scheduledTime: number;
playbackRate: number;
scheduledAtAnchor: number;
}) => {
queuedAudioNodes.push({
node,
timestamp,
buffer,
scheduledTime,
playbackRate,
scheduledAtAnchor,
});
},
removeQueuedAudioNode: (node: AudioBufferSourceNode) => {
Expand Down
2 changes: 1 addition & 1 deletion packages/media/src/media-player.ts
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ export class MediaPlayer {
audioTrack,
delayPlaybackHandleIfNotPremounting:
this.delayPlaybackHandleIfNotPremounting,
sharedAudioContext: this.sharedAudioContext.audioContext,
sharedAudioContext: this.sharedAudioContext,
getIsLooping: () => this.loop,
getEndTime: () => this.getEndTime(),
getStartTime: () => this.getStartTime(),
Expand Down
140 changes: 140 additions & 0 deletions packages/media/src/test/audio-cleanup-on-seek.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import {ALL_FORMATS, AudioBufferSink, Input, UrlSource} from 'mediabunny';
import {expect, test} from 'vitest';
import {makeAudioIterator} from '../audio/audio-preview-iterator';
import {makePrewarmedAudioIteratorCache} from '../prewarm-iterator-for-looping';
import type {SharedAudioContextForMediaPlayer} from '../shared-audio-context-for-media-player';

const makeCache = async () => {
const input = new Input({
source: new UrlSource('https://remotion.media/video.mp4'),
formats: ALL_FORMATS,
});
const audioTrack = await input.getPrimaryAudioTrack();
if (!audioTrack) {
throw new Error('No audio track found');
}

const audioBufferSink = new AudioBufferSink(audioTrack);

return makePrewarmedAudioIteratorCache(audioBufferSink);
};

const makeMockNode = () => {
let stopped = false;
const node = {
stop: () => {
stopped = true;
},
} as unknown as AudioBufferSourceNode;

return {
node,
wasStopped: () => stopped,
};
};

const makeMockBuffer = (duration: number) => {
return {
duration,
} as unknown as AudioBuffer;
};

const makeMockSharedAudioContext = ({
currentTime,
anchorValue,
}: {
currentTime: number;
anchorValue: number;
}): SharedAudioContextForMediaPlayer => {
return {
audioContext: {
currentTime,
getOutputTimestamp: () => ({contextTime: currentTime}),
} as unknown as AudioContext,
audioSyncAnchor: {value: anchorValue},
scheduleAudioNode: () => ({type: 'started', scheduledTime: 0}),
};
};

test('destroy should NOT stop nodes that are already playing with the same anchor', async () => {
const cache = await makeCache();
const iterator = makeAudioIterator({
startFromSecond: 0,
maximumTimestamp: Infinity,
cache,
debugAudioScheduling: false,
});

const mock1 = makeMockNode();
const mock2 = makeMockNode();

// Add nodes scheduled at anchor 0, with scheduledTime in the past
iterator.addQueuedAudioNode({
node: mock1.node,
timestamp: 0,
buffer: makeMockBuffer(0.021),
scheduledTime: 0.1, // in the past relative to currentTime=1.0
playbackRate: 1,
scheduledAtAnchor: 0,
});
iterator.addQueuedAudioNode({
node: mock2.node,
timestamp: 0.021,
buffer: makeMockBuffer(0.021),
scheduledTime: 0.121,
playbackRate: 1,
scheduledAtAnchor: 0,
});

// Destroy with same anchor (0) and currentTime well past scheduledTime
iterator.destroy(
makeMockSharedAudioContext({currentTime: 1.0, anchorValue: 0}),
);

// Nodes should NOT have been stopped because they are already playing
// and were scheduled for the current anchor
expect(mock1.wasStopped()).toBe(false);
expect(mock2.wasStopped()).toBe(false);
});

test('destroy should stop nodes when the audio anchor changed (seek to different position)', async () => {
const cache = await makeCache();
const iterator = makeAudioIterator({
startFromSecond: 0,
maximumTimestamp: Infinity,
cache,
debugAudioScheduling: false,
});

const mock1 = makeMockNode();
const mock2 = makeMockNode();

// Add nodes scheduled at anchor 0
iterator.addQueuedAudioNode({
node: mock1.node,
timestamp: 0,
buffer: makeMockBuffer(0.021),
scheduledTime: 0.1,
playbackRate: 1,
scheduledAtAnchor: 0,
});
iterator.addQueuedAudioNode({
node: mock2.node,
timestamp: 0.021,
buffer: makeMockBuffer(0.021),
scheduledTime: 0.121,
playbackRate: 1,
scheduledAtAnchor: 0,
});

// Destroy with a DIFFERENT anchor (simulating a seek happened)
// Even though nodes are "already playing" (currentTime > scheduledTime),
// they should be stopped because the anchor changed
iterator.destroy(
makeMockSharedAudioContext({currentTime: 1.0, anchorValue: 1}),
);

// Nodes SHOULD be stopped because the anchor changed (seek happened)
expect(mock1.wasStopped()).toBe(true);
expect(mock2.wasStopped()).toBe(true);
});
Loading
Loading