chore: monorepo setup (#7175)

This commit is contained in:
Noel
2022-01-07 17:18:25 +01:00
committed by GitHub
parent 780b7ed39f
commit 16390efe6e
504 changed files with 25459 additions and 22830 deletions

View File

@@ -0,0 +1,627 @@
import { addAudioPlayer, deleteAudioPlayer } from '../DataStore';
import { Awaited, noop } from '../util/util';
import { VoiceConnection, VoiceConnectionStatus } from '../VoiceConnection';
import { AudioPlayerError } from './AudioPlayerError';
import type { AudioResource } from './AudioResource';
import { PlayerSubscription } from './PlayerSubscription';
import { TypedEmitter } from 'tiny-typed-emitter';
// The Opus "silent" frame
export const SILENCE_FRAME = Buffer.from([0xf8, 0xff, 0xfe]);
/**
* Describes the behavior of the player when an audio packet is played but there are no available
* voice connections to play to.
*/
export enum NoSubscriberBehavior {
/**
* Pauses playing the stream until a voice connection becomes available.
*/
Pause = 'pause',
/**
* Continues to play through the resource regardless.
*/
Play = 'play',
/**
* The player stops and enters the Idle state.
*/
Stop = 'stop',
}
export enum AudioPlayerStatus {
/**
* When there is currently no resource for the player to be playing.
*/
Idle = 'idle',
/**
* When the player is waiting for an audio resource to become readable before transitioning to Playing.
*/
Buffering = 'buffering',
/**
* When the player has been manually paused.
*/
Paused = 'paused',
/**
* When the player is actively playing an audio resource.
*/
Playing = 'playing',
/**
* When the player has paused itself. Only possible with the "pause" no subscriber behavior.
*/
AutoPaused = 'autopaused',
}
/**
* Options that can be passed when creating an audio player, used to specify its behavior.
*/
export interface CreateAudioPlayerOptions {
debug?: boolean;
behaviors?: {
noSubscriber?: NoSubscriberBehavior;
maxMissedFrames?: number;
};
}
/**
* The state that an AudioPlayer is in when it has no resource to play. This is the starting state.
*/
export interface AudioPlayerIdleState {
status: AudioPlayerStatus.Idle;
}
/**
* The state that an AudioPlayer is in when it is waiting for a resource to become readable. Once this
* happens, the AudioPlayer will enter the Playing state. If the resource ends/errors before this, then
* it will re-enter the Idle state.
*/
export interface AudioPlayerBufferingState {
status: AudioPlayerStatus.Buffering;
/**
* The resource that the AudioPlayer is waiting for
*/
resource: AudioResource;
onReadableCallback: () => void;
onFailureCallback: () => void;
onStreamError: (error: Error) => void;
}
/**
* The state that an AudioPlayer is in when it is actively playing an AudioResource. When playback ends,
* it will enter the Idle state.
*/
export interface AudioPlayerPlayingState {
status: AudioPlayerStatus.Playing;
/**
* The number of consecutive times that the audio resource has been unable to provide an Opus frame.
*/
missedFrames: number;
/**
* The playback duration in milliseconds of the current audio resource. This includes filler silence packets
* that have been played when the resource was buffering.
*/
playbackDuration: number;
/**
* The resource that is being played.
*/
resource: AudioResource;
onStreamError: (error: Error) => void;
}
/**
* The state that an AudioPlayer is in when it has either been explicitly paused by the user, or done
* automatically by the AudioPlayer itself if there are no available subscribers.
*/
export interface AudioPlayerPausedState {
status: AudioPlayerStatus.Paused | AudioPlayerStatus.AutoPaused;
/**
* How many silence packets still need to be played to avoid audio interpolation due to the stream suddenly pausing.
*/
silencePacketsRemaining: number;
/**
* The playback duration in milliseconds of the current audio resource. This includes filler silence packets
* that have been played when the resource was buffering.
*/
playbackDuration: number;
/**
* The current resource of the audio player.
*/
resource: AudioResource;
onStreamError: (error: Error) => void;
}
/**
* The various states that the player can be in.
*/
export type AudioPlayerState =
| AudioPlayerIdleState
| AudioPlayerBufferingState
| AudioPlayerPlayingState
| AudioPlayerPausedState;
export type AudioPlayerEvents = {
error: (error: AudioPlayerError) => Awaited<void>;
debug: (message: string) => Awaited<void>;
stateChange: (oldState: AudioPlayerState, newState: AudioPlayerState) => Awaited<void>;
subscribe: (subscription: PlayerSubscription) => Awaited<void>;
unsubscribe: (subscription: PlayerSubscription) => Awaited<void>;
} & {
[status in AudioPlayerStatus]: (
oldState: AudioPlayerState,
newState: AudioPlayerState & { status: status },
) => Awaited<void>;
};
/**
* Used to play audio resources (i.e. tracks, streams) to voice connections.
*
* @remarks
* Audio players are designed to be re-used - even if a resource has finished playing, the player itself
* can still be used.
*
* The AudioPlayer drives the timing of playback, and therefore is unaffected by voice connections
* becoming unavailable. Its behavior in these scenarios can be configured.
*/
export class AudioPlayer extends TypedEmitter<AudioPlayerEvents> {
/**
* The state that the AudioPlayer is in.
*/
private _state: AudioPlayerState;
/**
* A list of VoiceConnections that are registered to this AudioPlayer. The player will attempt to play audio
* to the streams in this list.
*/
private readonly subscribers: PlayerSubscription[] = [];
/**
* The behavior that the player should follow when it enters certain situations.
*/
private readonly behaviors: {
noSubscriber: NoSubscriberBehavior;
maxMissedFrames: number;
};
/**
* The debug logger function, if debugging is enabled.
*/
private readonly debug: null | ((message: string) => void);
/**
* Creates a new AudioPlayer.
*/
public constructor(options: CreateAudioPlayerOptions = {}) {
super();
this._state = { status: AudioPlayerStatus.Idle };
this.behaviors = {
noSubscriber: NoSubscriberBehavior.Pause,
maxMissedFrames: 5,
...options.behaviors,
};
this.debug = options.debug === false ? null : (message: string) => this.emit('debug', message);
}
/**
* A list of subscribed voice connections that can currently receive audio to play.
*/
public get playable() {
return this.subscribers
.filter(({ connection }) => connection.state.status === VoiceConnectionStatus.Ready)
.map(({ connection }) => connection);
}
/**
* Subscribes a VoiceConnection to the audio player's play list. If the VoiceConnection is already subscribed,
* then the existing subscription is used.
*
* @remarks
* This method should not be directly called. Instead, use VoiceConnection#subscribe.
*
* @param connection - The connection to subscribe
*
* @returns The new subscription if the voice connection is not yet subscribed, otherwise the existing subscription
*/
// @ts-ignore
private subscribe(connection: VoiceConnection) {
const existingSubscription = this.subscribers.find((subscription) => subscription.connection === connection);
if (!existingSubscription) {
const subscription = new PlayerSubscription(connection, this);
this.subscribers.push(subscription);
setImmediate(() => this.emit('subscribe', subscription));
return subscription;
}
return existingSubscription;
}
/**
* Unsubscribes a subscription - i.e. removes a voice connection from the play list of the audio player.
*
* @remarks
* This method should not be directly called. Instead, use PlayerSubscription#unsubscribe.
*
* @param subscription - The subscription to remove
*
* @returns Whether or not the subscription existed on the player and was removed
*/
// @ts-ignore
private unsubscribe(subscription: PlayerSubscription) {
const index = this.subscribers.indexOf(subscription);
const exists = index !== -1;
if (exists) {
this.subscribers.splice(index, 1);
subscription.connection.setSpeaking(false);
this.emit('unsubscribe', subscription);
}
return exists;
}
/**
* The state that the player is in.
*/
public get state() {
return this._state;
}
/**
* Sets a new state for the player, performing clean-up operations where necessary.
*/
public set state(newState: AudioPlayerState) {
const oldState = this._state;
const newResource = Reflect.get(newState, 'resource') as AudioResource | undefined;
if (oldState.status !== AudioPlayerStatus.Idle && oldState.resource !== newResource) {
oldState.resource.playStream.on('error', noop);
oldState.resource.playStream.off('error', oldState.onStreamError);
oldState.resource.audioPlayer = undefined;
oldState.resource.playStream.destroy();
oldState.resource.playStream.read(); // required to ensure buffered data is drained, prevents memory leak
}
// When leaving the Buffering state (or buffering a new resource), then remove the event listeners from it
if (
oldState.status === AudioPlayerStatus.Buffering &&
(newState.status !== AudioPlayerStatus.Buffering || newState.resource !== oldState.resource)
) {
oldState.resource.playStream.off('end', oldState.onFailureCallback);
oldState.resource.playStream.off('close', oldState.onFailureCallback);
oldState.resource.playStream.off('finish', oldState.onFailureCallback);
oldState.resource.playStream.off('readable', oldState.onReadableCallback);
}
// transitioning into an idle should ensure that connections stop speaking
if (newState.status === AudioPlayerStatus.Idle) {
this._signalStopSpeaking();
deleteAudioPlayer(this);
}
// attach to the global audio player timer
if (newResource) {
addAudioPlayer(this);
}
// playing -> playing state changes should still transition if a resource changed (seems like it would be useful!)
const didChangeResources =
oldState.status !== AudioPlayerStatus.Idle &&
newState.status === AudioPlayerStatus.Playing &&
oldState.resource !== newState.resource;
this._state = newState;
this.emit('stateChange', oldState, this._state);
if (oldState.status !== newState.status || didChangeResources) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
this.emit(newState.status, oldState, this._state as any);
}
this.debug?.(`state change:\nfrom ${stringifyState(oldState)}\nto ${stringifyState(newState)}`);
}
/**
* Plays a new resource on the player. If the player is already playing a resource, the existing resource is destroyed
* (it cannot be reused, even in another player) and is replaced with the new resource.
*
* @remarks
* The player will transition to the Playing state once playback begins, and will return to the Idle state once
* playback is ended.
*
* If the player was previously playing a resource and this method is called, the player will not transition to the
* Idle state during the swap over.
*
* @param resource - The resource to play
*
* @throws Will throw if attempting to play an audio resource that has already ended, or is being played by another player
*/
public play<T>(resource: AudioResource<T>) {
if (resource.ended) {
throw new Error('Cannot play a resource that has already ended.');
}
if (resource.audioPlayer) {
if (resource.audioPlayer === this) {
return;
}
throw new Error('Resource is already being played by another audio player.');
}
resource.audioPlayer = this;
// Attach error listeners to the stream that will propagate the error and then return to the Idle
// state if the resource is still being used.
const onStreamError = (error: Error) => {
if (this.state.status !== AudioPlayerStatus.Idle) {
/**
* Emitted when there is an error emitted from the audio resource played by the audio player
*
* @event AudioPlayer#error
* @type {AudioPlayerError}
*/
this.emit('error', new AudioPlayerError(error, this.state.resource));
}
if (this.state.status !== AudioPlayerStatus.Idle && this.state.resource === resource) {
this.state = {
status: AudioPlayerStatus.Idle,
};
}
};
resource.playStream.once('error', onStreamError);
if (resource.started) {
this.state = {
status: AudioPlayerStatus.Playing,
missedFrames: 0,
playbackDuration: 0,
resource,
onStreamError,
};
} else {
const onReadableCallback = () => {
if (this.state.status === AudioPlayerStatus.Buffering && this.state.resource === resource) {
this.state = {
status: AudioPlayerStatus.Playing,
missedFrames: 0,
playbackDuration: 0,
resource,
onStreamError,
};
}
};
const onFailureCallback = () => {
if (this.state.status === AudioPlayerStatus.Buffering && this.state.resource === resource) {
this.state = {
status: AudioPlayerStatus.Idle,
};
}
};
resource.playStream.once('readable', onReadableCallback);
resource.playStream.once('end', onFailureCallback);
resource.playStream.once('close', onFailureCallback);
resource.playStream.once('finish', onFailureCallback);
this.state = {
status: AudioPlayerStatus.Buffering,
resource,
onReadableCallback,
onFailureCallback,
onStreamError,
};
}
}
/**
* Pauses playback of the current resource, if any.
*
* @param interpolateSilence - If true, the player will play 5 packets of silence after pausing to prevent audio glitches
*
* @returns `true` if the player was successfully paused, otherwise `false`
*/
public pause(interpolateSilence = true) {
if (this.state.status !== AudioPlayerStatus.Playing) return false;
this.state = {
...this.state,
status: AudioPlayerStatus.Paused,
silencePacketsRemaining: interpolateSilence ? 5 : 0,
};
return true;
}
/**
* Unpauses playback of the current resource, if any.
*
* @returns `true` if the player was successfully unpaused, otherwise `false`
*/
public unpause() {
if (this.state.status !== AudioPlayerStatus.Paused) return false;
this.state = {
...this.state,
status: AudioPlayerStatus.Playing,
missedFrames: 0,
};
return true;
}
/**
* Stops playback of the current resource and destroys the resource. The player will either transition to the Idle state,
* or remain in its current state until the silence padding frames of the resource have been played.
*
* @param force - If true, will force the player to enter the Idle state even if the resource has silence padding frames
*
* @returns `true` if the player will come to a stop, otherwise `false`
*/
public stop(force = false) {
if (this.state.status === AudioPlayerStatus.Idle) return false;
if (force || this.state.resource.silencePaddingFrames === 0) {
this.state = {
status: AudioPlayerStatus.Idle,
};
} else if (this.state.resource.silenceRemaining === -1) {
this.state.resource.silenceRemaining = this.state.resource.silencePaddingFrames;
}
return true;
}
/**
* Checks whether the underlying resource (if any) is playable (readable)
*
* @returns `true` if the resource is playable, otherwise `false`
*/
public checkPlayable() {
const state = this._state;
if (state.status === AudioPlayerStatus.Idle || state.status === AudioPlayerStatus.Buffering) return false;
// If the stream has been destroyed or is no longer readable, then transition to the Idle state.
if (!state.resource.readable) {
this.state = {
status: AudioPlayerStatus.Idle,
};
return false;
}
return true;
}
/**
* Called roughly every 20ms by the global audio player timer. Dispatches any audio packets that are buffered
* by the active connections of this audio player.
*/
// @ts-ignore
private _stepDispatch() {
const state = this._state;
// Guard against the Idle state
if (state.status === AudioPlayerStatus.Idle || state.status === AudioPlayerStatus.Buffering) return;
// Dispatch any audio packets that were prepared in the previous cycle
this.playable.forEach((connection) => connection.dispatchAudio());
}
/**
* Called roughly every 20ms by the global audio player timer. Attempts to read an audio packet from the
* underlying resource of the stream, and then has all the active connections of the audio player prepare it
* (encrypt it, append header data) so that it is ready to play at the start of the next cycle.
*/
// @ts-ignore
private _stepPrepare() {
const state = this._state;
// Guard against the Idle state
if (state.status === AudioPlayerStatus.Idle || state.status === AudioPlayerStatus.Buffering) return;
// List of connections that can receive the packet
const playable = this.playable;
/* If the player was previously in the AutoPaused state, check to see whether there are newly available
connections, allowing us to transition out of the AutoPaused state back into the Playing state */
if (state.status === AudioPlayerStatus.AutoPaused && playable.length > 0) {
this.state = {
...state,
status: AudioPlayerStatus.Playing,
missedFrames: 0,
};
}
/* If the player is (auto)paused, check to see whether silence packets should be played and
set a timeout to begin the next cycle, ending the current cycle here. */
if (state.status === AudioPlayerStatus.Paused || state.status === AudioPlayerStatus.AutoPaused) {
if (state.silencePacketsRemaining > 0) {
state.silencePacketsRemaining--;
this._preparePacket(SILENCE_FRAME, playable, state);
if (state.silencePacketsRemaining === 0) {
this._signalStopSpeaking();
}
}
return;
}
// If there are no available connections in this cycle, observe the configured "no subscriber" behavior.
if (playable.length === 0) {
if (this.behaviors.noSubscriber === NoSubscriberBehavior.Pause) {
this.state = {
...state,
status: AudioPlayerStatus.AutoPaused,
silencePacketsRemaining: 5,
};
return;
} else if (this.behaviors.noSubscriber === NoSubscriberBehavior.Stop) {
this.stop(true);
}
}
/**
* Attempt to read an Opus packet from the resource. If there isn't an available packet,
* play a silence packet. If there are 5 consecutive cycles with failed reads, then the
* playback will end.
*/
const packet: Buffer | null = state.resource.read();
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (state.status === AudioPlayerStatus.Playing) {
if (packet) {
this._preparePacket(packet, playable, state);
state.missedFrames = 0;
} else {
this._preparePacket(SILENCE_FRAME, playable, state);
state.missedFrames++;
if (state.missedFrames >= this.behaviors.maxMissedFrames) {
this.stop();
}
}
}
}
/**
* Signals to all the subscribed connections that they should send a packet to Discord indicating
* they are no longer speaking. Called once playback of a resource ends.
*/
private _signalStopSpeaking() {
return this.subscribers.forEach(({ connection }) => connection.setSpeaking(false));
}
/**
* Instructs the given connections to each prepare this packet to be played at the start of the
* next cycle.
*
* @param packet - The Opus packet to be prepared by each receiver
* @param receivers - The connections that should play this packet
*/
private _preparePacket(
packet: Buffer,
receivers: VoiceConnection[],
state: AudioPlayerPlayingState | AudioPlayerPausedState,
) {
state.playbackDuration += 20;
receivers.forEach((connection) => connection.prepareAudioPacket(packet));
}
}
/**
* Stringifies an AudioPlayerState instance.
*
* @param state - The state to stringify
*/
function stringifyState(state: AudioPlayerState) {
return JSON.stringify({
...state,
resource: Reflect.has(state, 'resource'),
stepTimeout: Reflect.has(state, 'stepTimeout'),
});
}
/**
* Creates a new AudioPlayer to be used.
*/
export function createAudioPlayer(options?: CreateAudioPlayerOptions) {
return new AudioPlayer(options);
}

View File

@@ -0,0 +1,18 @@
import type { AudioResource } from './AudioResource';
/**
* An error emitted by an AudioPlayer. Contains an attached resource to aid with
* debugging and identifying where the error came from.
*/
export class AudioPlayerError extends Error {
/**
* The resource associated with the audio player at the time the error was thrown.
*/
public readonly resource: AudioResource;
public constructor(error: Error, resource: AudioResource) {
super(error.message);
this.resource = resource;
this.name = error.name;
this.stack = error.stack;
}
}

View File

@@ -0,0 +1,285 @@
import { Edge, findPipeline, StreamType, TransformerType } from './TransformerGraph';
import { pipeline, Readable } from 'node:stream';
import { noop } from '../util/util';
import prism from 'prism-media';
import { AudioPlayer, SILENCE_FRAME } from './AudioPlayer';
/**
* Options that are set when creating a new audio resource.
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export interface CreateAudioResourceOptions<T> {
/**
* The type of the input stream. Defaults to `StreamType.Arbitrary`.
*/
inputType?: StreamType;
/**
* Optional metadata that can be attached to the resource (e.g. track title, random id).
* This is useful for identification purposes when the resource is passed around in events.
* See {@link AudioResource.metadata}
*/
metadata?: T;
/**
* Whether or not inline volume should be enabled. If enabled, you will be able to change the volume
* of the stream on-the-fly. However, this also increases the performance cost of playback. Defaults to `false`.
*/
inlineVolume?: boolean;
/**
* The number of silence frames to append to the end of the resource's audio stream, to prevent interpolation glitches.
* Defaults to 5.
*/
silencePaddingFrames?: number;
}
/**
* Represents an audio resource that can be played by an audio player.
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export class AudioResource<T = unknown> {
/**
* An object-mode Readable stream that emits Opus packets. This is what is played by audio players.
*/
public readonly playStream: Readable;
/**
* The pipeline used to convert the input stream into a playable format. For example, this may
* contain an FFmpeg component for arbitrary inputs, and it may contain a VolumeTransformer component
* for resources with inline volume transformation enabled.
*/
public readonly edges: readonly Edge[];
/**
* Optional metadata that can be used to identify the resource.
*/
public metadata: T;
/**
* If the resource was created with inline volume transformation enabled, then this will be a
* prism-media VolumeTransformer. You can use this to alter the volume of the stream.
*/
public readonly volume?: prism.VolumeTransformer;
/**
* If using an Opus encoder to create this audio resource, then this will be a prism-media opus.Encoder.
* You can use this to control settings such as bitrate, FEC, PLP.
*/
public readonly encoder?: prism.opus.Encoder;
/**
* The audio player that the resource is subscribed to, if any.
*/
public audioPlayer?: AudioPlayer;
/**
* The playback duration of this audio resource, given in milliseconds.
*/
public playbackDuration = 0;
/**
* Whether or not the stream for this resource has started (data has become readable)
*/
public started = false;
/**
* The number of silence frames to append to the end of the resource's audio stream, to prevent interpolation glitches.
*/
public readonly silencePaddingFrames: number;
/**
* The number of remaining silence frames to play. If -1, the frames have not yet started playing.
*/
public silenceRemaining = -1;
public constructor(edges: readonly Edge[], streams: readonly Readable[], metadata: T, silencePaddingFrames: number) {
this.edges = edges;
this.playStream = streams.length > 1 ? (pipeline(streams, noop) as any as Readable) : streams[0];
this.metadata = metadata;
this.silencePaddingFrames = silencePaddingFrames;
for (const stream of streams) {
if (stream instanceof prism.VolumeTransformer) {
this.volume = stream;
} else if (stream instanceof prism.opus.Encoder) {
this.encoder = stream;
}
}
this.playStream.once('readable', () => (this.started = true));
}
/**
* Whether this resource is readable. If the underlying resource is no longer readable, this will still return true
* while there are silence padding frames left to play.
*/
public get readable() {
if (this.silenceRemaining === 0) return false;
const real = this.playStream.readable;
if (!real) {
if (this.silenceRemaining === -1) this.silenceRemaining = this.silencePaddingFrames;
return this.silenceRemaining !== 0;
}
return real;
}
/**
* Whether this resource has ended or not.
*/
public get ended() {
return this.playStream.readableEnded || this.playStream.destroyed || this.silenceRemaining === 0;
}
/**
* Attempts to read an Opus packet from the audio resource. If a packet is available, the playbackDuration
* is incremented.
*
* @remarks
* It is advisable to check that the playStream is readable before calling this method. While no runtime
* errors will be thrown, you should check that the resource is still available before attempting to
* read from it.
*
* @internal
*/
public read(): Buffer | null {
if (this.silenceRemaining === 0) {
return null;
} else if (this.silenceRemaining > 0) {
this.silenceRemaining--;
return SILENCE_FRAME;
}
const packet: Buffer | null = this.playStream.read();
if (packet) {
this.playbackDuration += 20;
}
return packet;
}
}
/**
* Ensures that a path contains at least one volume transforming component.
*
* @param path - The path to validate constraints on
*/
export const VOLUME_CONSTRAINT = (path: Edge[]) => path.some((edge) => edge.type === TransformerType.InlineVolume);
export const NO_CONSTRAINT = () => true;
/**
* Tries to infer the type of a stream to aid with transcoder pipelining.
*
* @param stream - The stream to infer the type of
*/
export function inferStreamType(stream: Readable): {
streamType: StreamType;
hasVolume: boolean;
} {
if (stream instanceof prism.opus.Encoder) {
return { streamType: StreamType.Opus, hasVolume: false };
} else if (stream instanceof prism.opus.Decoder) {
return { streamType: StreamType.Raw, hasVolume: false };
} else if (stream instanceof prism.VolumeTransformer) {
return { streamType: StreamType.Raw, hasVolume: true };
} else if (stream instanceof prism.opus.OggDemuxer) {
return { streamType: StreamType.Opus, hasVolume: false };
} else if (stream instanceof prism.opus.WebmDemuxer) {
return { streamType: StreamType.Opus, hasVolume: false };
}
return { streamType: StreamType.Arbitrary, hasVolume: false };
}
/**
* Creates an audio resource that can be played by audio players.
*
* @remarks
* If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
*
* If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
* to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
* Opus transcoders, and Ogg/WebM demuxers.
*
* @param input - The resource to play
* @param options - Configurable options for creating the resource
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export function createAudioResource<T>(
input: string | Readable,
options: CreateAudioResourceOptions<T> &
Pick<
T extends null | undefined ? CreateAudioResourceOptions<T> : Required<CreateAudioResourceOptions<T>>,
'metadata'
>,
): AudioResource<T extends null | undefined ? null : T>;
/**
* Creates an audio resource that can be played by audio players.
*
* @remarks
* If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
*
* If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
* to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
* Opus transcoders, and Ogg/WebM demuxers.
*
* @param input - The resource to play
* @param options - Configurable options for creating the resource
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export function createAudioResource<T extends null | undefined>(
input: string | Readable,
options?: Omit<CreateAudioResourceOptions<T>, 'metadata'>,
): AudioResource<null>;
/**
* Creates an audio resource that can be played by audio players.
*
* @remarks
* If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
*
* If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
* to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
* Opus transcoders, and Ogg/WebM demuxers.
*
* @param input - The resource to play
* @param options - Configurable options for creating the resource
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export function createAudioResource<T>(
input: string | Readable,
options: CreateAudioResourceOptions<T> = {},
): AudioResource<T> {
let inputType = options.inputType;
let needsInlineVolume = Boolean(options.inlineVolume);
// string inputs can only be used with FFmpeg
if (typeof input === 'string') {
inputType = StreamType.Arbitrary;
} else if (typeof inputType === 'undefined') {
const analysis = inferStreamType(input);
inputType = analysis.streamType;
needsInlineVolume = needsInlineVolume && !analysis.hasVolume;
}
const transformerPipeline = findPipeline(inputType, needsInlineVolume ? VOLUME_CONSTRAINT : NO_CONSTRAINT);
if (transformerPipeline.length === 0) {
if (typeof input === 'string') throw new Error(`Invalid pipeline constructed for string resource '${input}'`);
// No adjustments required
return new AudioResource<T>([], [input], (options.metadata ?? null) as T, options.silencePaddingFrames ?? 5);
}
const streams = transformerPipeline.map((edge) => edge.transformer(input));
if (typeof input !== 'string') streams.unshift(input);
return new AudioResource<T>(
transformerPipeline,
streams,
(options.metadata ?? null) as T,
options.silencePaddingFrames ?? 5,
);
}

View File

@@ -0,0 +1,33 @@
/* eslint-disable @typescript-eslint/dot-notation */
import type { VoiceConnection } from '../VoiceConnection';
import type { AudioPlayer } from './AudioPlayer';
/**
* Represents a subscription of a voice connection to an audio player, allowing
* the audio player to play audio on the voice connection.
*/
export class PlayerSubscription {
/**
* The voice connection of this subscription.
*/
public readonly connection: VoiceConnection;
/**
* The audio player of this subscription.
*/
public readonly player: AudioPlayer;
public constructor(connection: VoiceConnection, player: AudioPlayer) {
this.connection = connection;
this.player = player;
}
/**
* Unsubscribes the connection from the audio player, meaning that the
* audio player cannot stream audio to it until a new subscription is made.
*/
public unsubscribe() {
this.connection['onSubscriptionRemoved'](this);
this.player['unsubscribe'](this);
}
}

View File

@@ -0,0 +1,264 @@
import type { Readable } from 'node:stream';
import prism from 'prism-media';
/**
* This module creates a Transformer Graph to figure out what the most efficient way
* of transforming the input stream into something playable would be.
*/
const FFMPEG_PCM_ARGUMENTS = ['-analyzeduration', '0', '-loglevel', '0', '-f', 's16le', '-ar', '48000', '-ac', '2'];
const FFMPEG_OPUS_ARGUMENTS = [
'-analyzeduration',
'0',
'-loglevel',
'0',
'-acodec',
'libopus',
'-f',
'opus',
'-ar',
'48000',
'-ac',
'2',
];
/**
* The different types of stream that can exist within the pipeline.
*
* @remarks
* - `Arbitrary` - the type of the stream at this point is unknown.
* - `Raw` - the stream at this point is s16le PCM.
* - `OggOpus` - the stream at this point is Opus audio encoded in an Ogg wrapper.
* - `WebmOpus` - the stream at this point is Opus audio encoded in a WebM wrapper.
* - `Opus` - the stream at this point is Opus audio, and the stream is in object-mode. This is ready to play.
*/
export enum StreamType {
Arbitrary = 'arbitrary',
Raw = 'raw',
OggOpus = 'ogg/opus',
WebmOpus = 'webm/opus',
Opus = 'opus',
}
/**
* The different types of transformers that can exist within the pipeline.
*/
export enum TransformerType {
FFmpegPCM = 'ffmpeg pcm',
FFmpegOgg = 'ffmpeg ogg',
OpusEncoder = 'opus encoder',
OpusDecoder = 'opus decoder',
OggOpusDemuxer = 'ogg/opus demuxer',
WebmOpusDemuxer = 'webm/opus demuxer',
InlineVolume = 'volume transformer',
}
/**
* Represents a pathway from one stream type to another using a transformer.
*/
export interface Edge {
from: Node;
to: Node;
cost: number;
transformer: (input: string | Readable) => Readable;
type: TransformerType;
}
/**
* Represents a type of stream within the graph, e.g. an Opus stream, or a stream of raw audio.
*/
export class Node {
/**
* The outbound edges from this node.
*/
public readonly edges: Edge[] = [];
/**
* The type of stream for this node.
*/
public readonly type: StreamType;
public constructor(type: StreamType) {
this.type = type;
}
/**
* Creates an outbound edge from this node.
*
* @param edge - The edge to create
*/
public addEdge(edge: Omit<Edge, 'from'>) {
this.edges.push({ ...edge, from: this });
}
}
// Create a node for each stream type
const NODES = new Map<StreamType, Node>();
for (const streamType of Object.values(StreamType)) {
NODES.set(streamType, new Node(streamType));
}
/**
* Gets a node from its stream type.
*
* @param type - The stream type of the target node
*/
export function getNode(type: StreamType) {
const node = NODES.get(type);
if (!node) throw new Error(`Node type '${type}' does not exist!`);
return node;
}
getNode(StreamType.Raw).addEdge({
type: TransformerType.OpusEncoder,
to: getNode(StreamType.Opus),
cost: 1.5,
transformer: () => new prism.opus.Encoder({ rate: 48000, channels: 2, frameSize: 960 }),
});
getNode(StreamType.Opus).addEdge({
type: TransformerType.OpusDecoder,
to: getNode(StreamType.Raw),
cost: 1.5,
transformer: () => new prism.opus.Decoder({ rate: 48000, channels: 2, frameSize: 960 }),
});
getNode(StreamType.OggOpus).addEdge({
type: TransformerType.OggOpusDemuxer,
to: getNode(StreamType.Opus),
cost: 1,
transformer: () => new prism.opus.OggDemuxer(),
});
getNode(StreamType.WebmOpus).addEdge({
type: TransformerType.WebmOpusDemuxer,
to: getNode(StreamType.Opus),
cost: 1,
transformer: () => new prism.opus.WebmDemuxer(),
});
const FFMPEG_PCM_EDGE: Omit<Edge, 'from'> = {
type: TransformerType.FFmpegPCM,
to: getNode(StreamType.Raw),
cost: 2,
transformer: (input) =>
new prism.FFmpeg({
args: typeof input === 'string' ? ['-i', input, ...FFMPEG_PCM_ARGUMENTS] : FFMPEG_PCM_ARGUMENTS,
}),
};
getNode(StreamType.Arbitrary).addEdge(FFMPEG_PCM_EDGE);
getNode(StreamType.OggOpus).addEdge(FFMPEG_PCM_EDGE);
getNode(StreamType.WebmOpus).addEdge(FFMPEG_PCM_EDGE);
getNode(StreamType.Raw).addEdge({
type: TransformerType.InlineVolume,
to: getNode(StreamType.Raw),
cost: 0.5,
transformer: () => new prism.VolumeTransformer({ type: 's16le' }),
});
// Try to enable FFmpeg Ogg optimizations
function canEnableFFmpegOptimizations(): boolean {
try {
return prism.FFmpeg.getInfo().output.includes('--enable-libopus');
} catch {}
return false;
}
if (canEnableFFmpegOptimizations()) {
const FFMPEG_OGG_EDGE: Omit<Edge, 'from'> = {
type: TransformerType.FFmpegOgg,
to: getNode(StreamType.OggOpus),
cost: 2,
transformer: (input) =>
new prism.FFmpeg({
args: typeof input === 'string' ? ['-i', input, ...FFMPEG_OPUS_ARGUMENTS] : FFMPEG_OPUS_ARGUMENTS,
}),
};
getNode(StreamType.Arbitrary).addEdge(FFMPEG_OGG_EDGE);
// Include Ogg and WebM as well in case they have different sampling rates or are mono instead of stereo
// at the moment, this will not do anything. However, if/when detection for correct Opus headers is
// implemented, this will help inform the voice engine that it is able to transcode the audio.
getNode(StreamType.OggOpus).addEdge(FFMPEG_OGG_EDGE);
getNode(StreamType.WebmOpus).addEdge(FFMPEG_OGG_EDGE);
}
/**
* Represents a step in the path from node A to node B.
*/
interface Step {
/**
* The next step.
*/
next?: Step;
/**
* The cost of the steps after this step.
*/
cost: number;
/**
* The edge associated with this step.
*/
edge?: Edge;
}
/**
* Finds the shortest cost path from node A to node B.
*
* @param from - The start node
* @param constraints - Extra validation for a potential solution. Takes a path, returns true if the path is valid
* @param goal - The target node
* @param path - The running path
* @param depth - The number of remaining recursions
*/
function findPath(
from: Node,
constraints: (path: Edge[]) => boolean,
goal = getNode(StreamType.Opus),
path: Edge[] = [],
depth = 5,
): Step {
if (from === goal && constraints(path)) {
return { cost: 0 };
} else if (depth === 0) {
return { cost: Infinity };
}
let currentBest: Step | undefined = undefined;
for (const edge of from.edges) {
if (currentBest && edge.cost > currentBest.cost) continue;
const next = findPath(edge.to, constraints, goal, [...path, edge], depth - 1);
const cost = edge.cost + next.cost;
if (!currentBest || cost < currentBest.cost) {
currentBest = { cost, edge, next };
}
}
return currentBest ?? { cost: Infinity };
}
/**
* Takes the solution from findPath and assembles it into a list of edges.
*
* @param step - The first step of the path
*/
function constructPipeline(step: Step) {
const edges = [];
let current: Step | undefined = step;
while (current?.edge) {
edges.push(current.edge);
current = current.next;
}
return edges;
}
/**
* Finds the lowest-cost pipeline to convert the input stream type into an Opus stream.
*
* @param from - The stream type to start from
* @param constraint - Extra constraints that may be imposed on potential solution
*/
export function findPipeline(from: StreamType, constraint: (path: Edge[]) => boolean) {
return constructPipeline(findPath(getNode(from), constraint));
}

View File

@@ -0,0 +1,390 @@
/* eslint-disable @typescript-eslint/dot-notation */
import { AudioResource } from '../../audio/AudioResource';
import { createAudioPlayer, AudioPlayerStatus, AudioPlayer, SILENCE_FRAME } from '../AudioPlayer';
import { Readable } from 'node:stream';
import { addAudioPlayer, deleteAudioPlayer } from '../../DataStore';
import { NoSubscriberBehavior } from '../..';
import { VoiceConnection, VoiceConnectionStatus } from '../../VoiceConnection';
import { once } from 'node:events';
import { AudioPlayerError } from '../AudioPlayerError';
jest.mock('../../DataStore');
jest.mock('../../VoiceConnection');
jest.mock('../AudioPlayerError');
const addAudioPlayerMock = addAudioPlayer as unknown as jest.Mock<typeof addAudioPlayer>;
const deleteAudioPlayerMock = deleteAudioPlayer as unknown as jest.Mock<typeof deleteAudioPlayer>;
const AudioPlayerErrorMock = AudioPlayerError as unknown as jest.Mock<typeof AudioPlayerError>;
const VoiceConnectionMock = VoiceConnection as unknown as jest.Mock<VoiceConnection>;
function* silence() {
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
while (true) {
yield Buffer.from([0xf8, 0xff, 0xfe]);
}
}
function createVoiceConnectionMock() {
const connection = new VoiceConnectionMock();
connection.state = {
status: VoiceConnectionStatus.Signalling,
adapter: {
sendPayload: jest.fn(),
destroy: jest.fn(),
},
};
connection.subscribe = jest.fn((player) => player['subscribe'](connection));
return connection;
}
function wait() {
return new Promise((resolve) => process.nextTick(resolve));
}
async function started(resource: AudioResource) {
while (!resource.started) {
await wait();
}
return resource;
}
let player: AudioPlayer | undefined;
beforeEach(() => {
AudioPlayerErrorMock.mockReset();
VoiceConnectionMock.mockReset();
addAudioPlayerMock.mockReset();
deleteAudioPlayerMock.mockReset();
});
afterEach(() => {
player?.stop(true);
});
describe('State transitions', () => {
test('Starts in Idle state', () => {
player = createAudioPlayer();
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(addAudioPlayerMock).toBeCalledTimes(0);
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
});
test('Playing resource with pausing and resuming', async () => {
// Call AudioResource constructor directly to avoid analysing pipeline for stream
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer();
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
// Pause and unpause should not affect the status of an Idle player
expect(player.pause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(player.unpause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(addAudioPlayerMock).toBeCalledTimes(0);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
// Expect pause() to return true and transition to paused state
expect(player.pause()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Paused);
// further calls to pause() should be unsuccessful
expect(player.pause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Paused);
// unpause() should transition back to Playing
expect(player.unpause()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
// further calls to unpause() should be unsuccessful
expect(player.unpause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
// The audio player should not have been deleted throughout these changes
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
});
test('Playing to Stopping', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer();
// stop() shouldn't do anything in Idle state
expect(player.stop(true)).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
expect(player.stop()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
expect(resource.silenceRemaining).toBe(5);
});
test('Buffering to Playing', async () => {
const resource = new AudioResource([], [Readable.from(silence())], null, 5);
player = createAudioPlayer();
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Buffering);
await started(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toHaveBeenCalled();
expect(deleteAudioPlayerMock).not.toHaveBeenCalled();
});
describe('NoSubscriberBehavior transitions', () => {
test('NoSubscriberBehavior.Pause', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer({ behaviors: { noSubscriber: NoSubscriberBehavior.Pause } });
connection.subscribe(player);
player.play(resource);
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.AutoPaused);
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
});
test('NoSubscriberBehavior.Play', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer({ behaviors: { noSubscriber: NoSubscriberBehavior.Play } });
player.play(resource);
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
});
test('NoSubscriberBehavior.Stop', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer({ behaviors: { noSubscriber: NoSubscriberBehavior.Stop } });
player.play(resource);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(deleteAudioPlayerMock).toBeCalledTimes(1);
});
});
test('Normal playing state', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
const buffer = Buffer.from([1, 2, 4, 8]);
const resource = await started(
new AudioResource([], [Readable.from([buffer, buffer, buffer, buffer, buffer])], null, 5),
);
player = createAudioPlayer();
connection.subscribe(player);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
// Run through a few packet cycles
for (let i = 1; i <= 5; i++) {
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(i);
await wait(); // Wait for the stream
player['_stepPrepare']();
expect(connection.prepareAudioPacket).toHaveBeenCalledTimes(i);
expect(connection.prepareAudioPacket).toHaveBeenLastCalledWith(buffer);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
if (player.state.status === AudioPlayerStatus.Playing) {
expect(player.state.playbackDuration).toStrictEqual(i * 20);
}
}
// Expect silence to be played
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(6);
await wait();
player['_stepPrepare']();
const prepareAudioPacket = connection.prepareAudioPacket as unknown as jest.Mock<
typeof connection.prepareAudioPacket
>;
expect(prepareAudioPacket).toHaveBeenCalledTimes(6);
expect(prepareAudioPacket.mock.calls[5][0]).toEqual(silence().next().value);
player.stop(true);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(connection.setSpeaking).toBeCalledTimes(1);
expect(connection.setSpeaking).toHaveBeenLastCalledWith(false);
expect(deleteAudioPlayerMock).toHaveBeenCalledTimes(1);
});
test('stop() causes resource to use silence padding frames', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
const buffer = Buffer.from([1, 2, 4, 8]);
const resource = await started(
new AudioResource([], [Readable.from([buffer, buffer, buffer, buffer, buffer])], null, 5),
);
player = createAudioPlayer();
connection.subscribe(player);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
player.stop();
// Run through a few packet cycles
for (let i = 1; i <= 5; i++) {
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(i);
await wait(); // Wait for the stream
player['_stepPrepare']();
expect(connection.prepareAudioPacket).toHaveBeenCalledTimes(i);
expect(connection.prepareAudioPacket).toHaveBeenLastCalledWith(SILENCE_FRAME);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
if (player.state.status === AudioPlayerStatus.Playing) {
expect(player.state.playbackDuration).toStrictEqual(i * 20);
}
}
await wait();
expect(player.checkPlayable()).toBe(false);
const prepareAudioPacket = connection.prepareAudioPacket as unknown as jest.Mock<
typeof connection.prepareAudioPacket
>;
expect(prepareAudioPacket).toHaveBeenCalledTimes(5);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(connection.setSpeaking).toBeCalledTimes(1);
expect(connection.setSpeaking).toHaveBeenLastCalledWith(false);
expect(deleteAudioPlayerMock).toHaveBeenCalledTimes(1);
});
test('Plays silence 5 times for unreadable stream before quitting', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
const resource = await started(new AudioResource([], [Readable.from([1])], null, 0));
resource.playStream.read();
player = createAudioPlayer({ behaviors: { maxMissedFrames: 5 } });
connection.subscribe(player);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
const prepareAudioPacket = connection.prepareAudioPacket as unknown as jest.Mock<
typeof connection.prepareAudioPacket
>;
// Run through a few packet cycles
for (let i = 1; i <= 5; i++) {
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
if (player.state.status !== AudioPlayerStatus.Playing) throw new Error('Error');
expect(player.state.playbackDuration).toStrictEqual((i - 1) * 20);
expect(player.state.missedFrames).toBe(i - 1);
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(i);
player['_stepPrepare']();
expect(prepareAudioPacket).toHaveBeenCalledTimes(i);
expect(prepareAudioPacket.mock.calls[i - 1][0]).toEqual(silence().next().value);
}
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(connection.setSpeaking).toBeCalledTimes(1);
expect(connection.setSpeaking).toHaveBeenLastCalledWith(false);
expect(deleteAudioPlayerMock).toHaveBeenCalledTimes(1);
});
test('checkPlayable() transitions to Idle for unreadable stream', async () => {
const resource = await started(new AudioResource([], [Readable.from([1])], null, 0));
player = createAudioPlayer();
player.play(resource);
expect(player.checkPlayable()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
for (let i = 0; i < 3; i++) {
resource.playStream.read();
await wait();
}
expect(resource.playStream.readableEnded).toBe(true);
expect(player.checkPlayable()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
});
});
test('play() throws when playing a resource that has already ended', async () => {
const resource = await started(new AudioResource([], [Readable.from([1])], null, 5));
player = createAudioPlayer();
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
for (let i = 0; i < 3; i++) {
resource.playStream.read();
await wait();
}
expect(resource.playStream.readableEnded).toBe(true);
player.stop(true);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(() => player.play(resource)).toThrow();
});
test('Propagates errors from streams', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer();
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
const error = new Error('AudioPlayer test error');
process.nextTick(() => resource.playStream.emit('error', error));
const res = await once(player, 'error');
const playerError = res[0] as AudioPlayerError;
expect(playerError).toBeInstanceOf(AudioPlayerError);
expect(AudioPlayerErrorMock).toHaveBeenCalledWith(error, resource);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
});

View File

@@ -0,0 +1,124 @@
import { opus, VolumeTransformer } from 'prism-media';
import { PassThrough, Readable } from 'node:stream';
import { SILENCE_FRAME } from '../AudioPlayer';
import { AudioResource, createAudioResource, NO_CONSTRAINT, VOLUME_CONSTRAINT } from '../AudioResource';
import { Edge, findPipeline as _findPipeline, StreamType, TransformerType } from '../TransformerGraph';
jest.mock('prism-media');
jest.mock('../TransformerGraph');
function wait() {
return new Promise((resolve) => process.nextTick(resolve));
}
async function started(resource: AudioResource) {
while (!resource.started) {
await wait();
}
return resource;
}
const findPipeline = _findPipeline as unknown as jest.MockedFunction<typeof _findPipeline>;
beforeAll(() => {
findPipeline.mockImplementation((from: StreamType, constraint: (path: Edge[]) => boolean) => {
const base = [
{
cost: 1,
transformer: () => new PassThrough(),
type: TransformerType.FFmpegPCM,
},
];
if (constraint === VOLUME_CONSTRAINT) {
base.push({
cost: 1,
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
transformer: () => new VolumeTransformer({} as any),
type: TransformerType.InlineVolume,
});
}
return base as any[];
});
});
beforeEach(() => {
findPipeline.mockClear();
});
describe('createAudioResource', () => {
test('Creates a resource from string path', () => {
const resource = createAudioResource('mypath.mp3');
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Creates a resource from string path (volume)', () => {
const resource = createAudioResource('mypath.mp3', { inlineVolume: true });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, VOLUME_CONSTRAINT);
expect(resource.volume).toBeInstanceOf(VolumeTransformer);
});
test('Only infers type if not explicitly given', () => {
const resource = createAudioResource(new opus.Encoder(), { inputType: StreamType.Arbitrary });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Infers from opus.Encoder', () => {
const resource = createAudioResource(new opus.Encoder(), { inlineVolume: true });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Opus, VOLUME_CONSTRAINT);
expect(resource.volume).toBeInstanceOf(VolumeTransformer);
expect(resource.encoder).toBeInstanceOf(opus.Encoder);
});
test('Infers from opus.OggDemuxer', () => {
const resource = createAudioResource(new opus.OggDemuxer());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Opus, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
expect(resource.encoder).toBeUndefined();
});
test('Infers from opus.WebmDemuxer', () => {
const resource = createAudioResource(new opus.WebmDemuxer());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Opus, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Infers from opus.Decoder', () => {
const resource = createAudioResource(new opus.Decoder());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Raw, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Infers from VolumeTransformer', () => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
const stream = new VolumeTransformer({} as any);
const resource = createAudioResource(stream, { inlineVolume: true });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Raw, NO_CONSTRAINT);
expect(resource.volume).toBe(stream);
});
test('Falls back to Arbitrary for unknown stream type', () => {
const resource = createAudioResource(new PassThrough());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Appends silence frames when ended', async () => {
const stream = Readable.from(Buffer.from([1]));
const resource = new AudioResource([], [stream], null, 5);
await started(resource);
expect(resource.readable).toBe(true);
expect(resource.read()).toEqual(Buffer.from([1]));
for (let i = 0; i < 5; i++) {
await wait();
expect(resource.readable).toBe(true);
expect(resource.read()).toBe(SILENCE_FRAME);
}
await wait();
expect(resource.readable).toBe(false);
expect(resource.read()).toBe(null);
});
});

View File

@@ -0,0 +1,49 @@
import { Edge, findPipeline, StreamType, TransformerType } from '../TransformerGraph';
const noConstraint = () => true;
/**
* Converts a pipeline into an easier-to-parse list of stream types within the pipeline
*
* @param pipeline - The pipeline of edges returned by findPipeline(...)
*/
function reducePath(pipeline: Edge[]) {
const streams = [pipeline[0].from.type];
for (const edge of pipeline.slice(1)) {
streams.push(edge.from.type);
}
streams.push(pipeline[pipeline.length - 1].to.type);
return streams;
}
const isVolume = (edge: Edge) => edge.type === TransformerType.InlineVolume;
const containsVolume = (edges: Edge[]) => edges.some(isVolume);
describe('findPipeline (no constraints)', () => {
test.each([StreamType.Arbitrary, StreamType.OggOpus, StreamType.WebmOpus, StreamType.Raw])(
'%s maps to opus with no inline volume',
(type) => {
const pipeline = findPipeline(type, noConstraint);
const path = reducePath(pipeline);
expect(path.length).toBeGreaterThanOrEqual(2);
expect(path[0]).toBe(type);
expect(path.pop()).toBe(StreamType.Opus);
expect(pipeline.some(isVolume)).toBe(false);
},
);
test('opus is unchanged', () => {
expect(findPipeline(StreamType.Opus, noConstraint)).toHaveLength(0);
});
});
describe('findPipeline (volume constraint)', () => {
test.each(Object.values(StreamType))('%s maps to opus with inline volume', (type) => {
const pipeline = findPipeline(type, containsVolume);
const path = reducePath(pipeline);
expect(path.length).toBeGreaterThanOrEqual(2);
expect(path[0]).toBe(type);
expect(path.pop()).toBe(StreamType.Opus);
expect(pipeline.some(isVolume)).toBe(true);
});
});

View File

@@ -0,0 +1,21 @@
export {
AudioPlayer,
AudioPlayerStatus,
AudioPlayerState,
NoSubscriberBehavior,
createAudioPlayer,
AudioPlayerBufferingState,
AudioPlayerIdleState,
AudioPlayerPausedState,
AudioPlayerPlayingState,
CreateAudioPlayerOptions,
AudioPlayerEvents,
} from './AudioPlayer';
export { AudioPlayerError } from './AudioPlayerError';
export { AudioResource, CreateAudioResourceOptions, createAudioResource } from './AudioResource';
export { PlayerSubscription } from './PlayerSubscription';
export { StreamType } from './TransformerGraph';