chore: monorepo setup (#7175)

This commit is contained in:
Noel
2022-01-07 17:18:25 +01:00
committed by GitHub
parent 780b7ed39f
commit 16390efe6e
504 changed files with 25459 additions and 22830 deletions

View File

@@ -0,0 +1,2 @@
*.d.ts
examples

View File

@@ -0,0 +1,16 @@
{
"root": true,
"extends": "marine/prettier/node",
"parserOptions": {
"project": "./tsconfig.eslint.json",
"extraFileExtensions": [".mjs"]
},
"ignorePatterns": ["**/dist/*"],
"env": {
"jest": true
},
"rules": {
"no-redeclare": 0,
"@typescript-eslint/naming-convention": 0
}
}

23
packages/voice/.gitignore vendored Normal file
View File

@@ -0,0 +1,23 @@
# Packages
node_modules/
# Log files
logs/
*.log
npm-debug.log*
# Runtime data
pids
*.pid
*.seed
# Dist
dist/
typings/
docs/**/*
!docs/index.yml
!docs/README.md
# Miscellaneous
.tmp/
coverage/

View File

@@ -0,0 +1,8 @@
{
"printWidth": 120,
"useTabs": true,
"singleQuote": true,
"quoteProps": "as-needed",
"trailingComma": "all",
"endOfLine": "lf"
}

View File

@@ -0,0 +1,3 @@
{
"releaseCommitMessageFormat": "chore(Release): publish"
}

View File

@@ -0,0 +1,61 @@
# Changelog
All notable changes to this project will be documented in this file.
## [0.7.5](https://github.com/discordjs/voice/compare/v0.7.4...v0.7.5) (2021-11-12)
### Bug Fixes
* postbuild script ([644af95](https://github.com/discordjs/voice/commit/644af9579f02724c489514f482640b8413d2c305))
## [0.7.4](https://github.com/discordjs/voice/compare/v0.7.3...v0.7.4) (2021-11-12)
### Bug Fixes
* conditionally apply banner only to esm build ([8c4e8c4](https://github.com/discordjs/voice/commit/8c4e8c4ba5b9013a90de0238a7f2771e9113a62d))
## [0.7.3](https://github.com/discordjs/voice/compare/v0.7.2...v0.7.3) (2021-11-11)
### Bug Fixes
* **esm:** resolve esm imports ([#229](https://github.com/discordjs/voice/issues/229)) ([616f2bc](https://github.com/discordjs/voice/commit/616f2bcfde47e55ac7b09f4faaa07f15d78c11a5))
## [0.7.2](https://github.com/discordjs/voice/compare/v0.7.1...v0.7.2) (2021-10-30)
### Bug Fixes
* prism imports for ems ([0bfd6d5](https://github.com/discordjs/voice/commit/0bfd6d5247f89cfc125e7645e9fb7ebfed94bb2f))
## [0.7.1](https://github.com/discordjs/voice/compare/v0.7.0...v0.7.1) (2021-10-30)
### Bug Fixes
* prism imports for esm ([9222dbf](https://github.com/discordjs/voice/commit/9222dbfedd8bfaeb679133dfa41330ea75a03a70))
# [0.7.0](https://github.com/discordjs/voice/compare/v0.6.0...v0.7.0) (2021-10-30)
### Features
* export some types so they render in docs ([#211](https://github.com/discordjs/voice/issues/211)) ([a6dad47](https://github.com/discordjs/voice/commit/a6dad4781fb479d22d7bff99888e42368d6d6411))
# Changelog
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.

190
packages/voice/LICENSE Normal file
View File

@@ -0,0 +1,190 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2020-2021 Amish Shah
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

82
packages/voice/README.md Normal file
View File

@@ -0,0 +1,82 @@
<div align="center">
<br />
<p>
<a href="https://discord.js.org"><img src="https://discord.js.org/static/logo.svg" width="546" alt="discord.js" /></a>
</p>
<br />
<p>
<a href="https://discord.gg/djs"><img src="https://img.shields.io/discord/222078108977594368?color=5865F2&logo=discord&logoColor=white" alt="Discord server" /></a>
<a href="https://www.npmjs.com/package/@discordjs/voice"><img src="https://img.shields.io/npm/v/@discordjs/voice.svg?maxAge=3600" alt="npm version" /></a>
<a href="https://www.npmjs.com/package/@discordjs/voice"><img src="https://img.shields.io/npm/dt/@discordjs/voice.svg?maxAge=3600" alt="npm downloads" /></a>
<a href="https://github.com/discordjs/voice/actions"><img src="https://github.com/discordjs/voice/workflows/Tests/badge.svg" alt="Build status" /></a>
<a href="https://codecov.io/gh/discordjs/voice"><img src="https://codecov.io/gh/discordjs/voice/branch/main/graph/badge.svg" alt="Code coverage" /></a>
<a href="https://www.patreon.com/discordjs"><img src="https://img.shields.io/badge/donate-patreon-F96854.svg" alt="Patreon" /></a>
</p>
</div>
## About
An implementation of the Discord Voice API for Node.js, written in TypeScript.
**Features:**
- Send and receive\* audio in Discord voice-based channels
- A strong focus on reliability and predictable behaviour
- Horizontal scalability and libraries other than [discord.js](https://discord.js.org/) are supported with custom adapters
- A robust audio processing system that can handle a wide range of audio sources
\*_Audio receive is not documented by Discord so stable support is not guaranteed_
## Installation
**Node.js 16.0.0 or newer is required.**
```sh-session
npm install @discordjs/voice
yarn add @discordjs/voice
pnpm add @discordjs/voice
```
## Dependencies
This library has several optional dependencies to support a variety
of different platforms. Install one dependency from each of the
categories shown below. The dependencies are listed in order of
preference for performance. If you can't install one of the options,
try installing another.
**Encryption Libraries (npm install):**
- `sodium`: ^3.0.2
- `tweetnacl`: ^1.0.3
- `libsodium-wrappers`: ^0.7.9
**Opus Libraries (npm install):**
- `@discordjs/opus`: ^0.4.0
- `opusscript`: ^0.0.7
**FFmpeg:**
- [`FFmpeg`](https://ffmpeg.org/) (installed and added to environment)
- `ffmpeg-static`: ^4.2.7 (npm install)
## Links
- [Website](https://discord.js.org/) ([source](https://github.com/discordjs/website))
- [Documentation](https://discord.js.org/#/docs/voice)
- [Examples](https://github.com/discordjs/voice/tree/main/examples)
- [discord.js Discord server](https://discord.gg/djs)
- [GitHub](https://github.com/discordjs/voice)
- [npm](https://www.npmjs.com/package/@discordjs/voice)
## Contributing
Before creating an issue, please ensure that it hasn't already been reported/suggested, and double-check the
[documentation](https://discord.js.org/#/docs/voice).
See [the contribution guide](https://github.com/discordjs/voice/blob/main/.github/CONTRIBUTING.md) if you'd like to submit a PR.
## Help
If you don't understand something in the documentation, you are experiencing problems, or you just need a gentle
nudge in the right direction, please don't hesitate to join our official [discord.js Server](https://discord.gg/djs).

View File

@@ -0,0 +1 @@
export { WebSocket as default } from 'mock-socket';

View File

@@ -0,0 +1,100 @@
/* eslint-disable @typescript-eslint/dot-notation */
import { GatewayOpcodes } from 'discord-api-types/v9';
import * as DataStore from '../src/DataStore';
import * as _AudioPlayer from '../src/audio/AudioPlayer';
import { VoiceConnection } from '../src/VoiceConnection';
jest.mock('../src/VoiceConnection');
jest.mock('../src/audio/AudioPlayer');
const AudioPlayer = _AudioPlayer as unknown as jest.Mocked<typeof _AudioPlayer>;
function createVoiceConnection(joinConfig: Pick<DataStore.JoinConfig, 'group' | 'guildId'>): VoiceConnection {
return {
joinConfig: { channelId: '123', selfMute: false, selfDeaf: true, ...joinConfig },
} as any;
}
function waitForEventLoop() {
return new Promise((res) => setImmediate(res));
}
beforeEach(() => {
const groups = DataStore.getGroups();
for (const groupKey of groups.keys()) {
groups.delete(groupKey);
}
groups.set('default', new Map());
});
const voiceConnectionDefault = createVoiceConnection({ guildId: '123', group: 'default' });
const voiceConnectionAbc = createVoiceConnection({ guildId: '123', group: 'abc' });
describe('DataStore', () => {
test('VoiceConnection join payload creation', () => {
const joinConfig: DataStore.JoinConfig = {
guildId: '123',
channelId: '123',
selfDeaf: true,
selfMute: false,
group: 'default',
};
expect(DataStore.createJoinVoiceChannelPayload(joinConfig)).toStrictEqual({
op: GatewayOpcodes.VoiceStateUpdate,
d: {
guild_id: joinConfig.guildId,
channel_id: joinConfig.channelId,
self_deaf: joinConfig.selfDeaf,
self_mute: joinConfig.selfMute,
},
});
});
test('VoiceConnection management respects group', () => {
DataStore.trackVoiceConnection(voiceConnectionDefault);
DataStore.trackVoiceConnection(voiceConnectionAbc);
expect(DataStore.getVoiceConnection('123')).toBe(voiceConnectionDefault);
expect(DataStore.getVoiceConnection('123', 'default')).toBe(voiceConnectionDefault);
expect(DataStore.getVoiceConnection('123', 'abc')).toBe(voiceConnectionAbc);
expect([...DataStore.getGroups().keys()]).toEqual(['default', 'abc']);
expect([...DataStore.getVoiceConnections().values()]).toEqual([voiceConnectionDefault]);
expect([...DataStore.getVoiceConnections('default').values()]).toEqual([voiceConnectionDefault]);
expect([...DataStore.getVoiceConnections('abc').values()]).toEqual([voiceConnectionAbc]);
DataStore.untrackVoiceConnection(voiceConnectionDefault);
expect(DataStore.getVoiceConnection('123')).toBeUndefined();
expect(DataStore.getVoiceConnection('123', 'abc')).toBe(voiceConnectionAbc);
});
test('Managing Audio Players', async () => {
const player = DataStore.addAudioPlayer(new AudioPlayer.AudioPlayer());
const dispatchSpy = jest.spyOn(player as any, '_stepDispatch');
const prepareSpy = jest.spyOn(player as any, '_stepPrepare');
expect(DataStore.hasAudioPlayer(player)).toBe(true);
expect(DataStore.addAudioPlayer(player)).toBe(player);
DataStore.deleteAudioPlayer(player);
expect(DataStore.deleteAudioPlayer(player)).toBe(undefined);
expect(DataStore.hasAudioPlayer(player)).toBe(false);
// Tests audio cycle with nextTime === -1
await waitForEventLoop();
expect(dispatchSpy).toHaveBeenCalledTimes(0);
expect(prepareSpy).toHaveBeenCalledTimes(0);
});
test('Preparing Audio Frames', async () => {
// Test functional player
const player2 = DataStore.addAudioPlayer(new AudioPlayer.AudioPlayer());
player2['checkPlayable'] = jest.fn(() => true);
const player3 = DataStore.addAudioPlayer(new AudioPlayer.AudioPlayer());
const dispatchSpy2 = jest.spyOn(player2 as any, '_stepDispatch');
const prepareSpy2 = jest.spyOn(player2 as any, '_stepPrepare');
const dispatchSpy3 = jest.spyOn(player3 as any, '_stepDispatch');
const prepareSpy3 = jest.spyOn(player3 as any, '_stepPrepare');
await waitForEventLoop();
DataStore.deleteAudioPlayer(player2);
await waitForEventLoop();
DataStore.deleteAudioPlayer(player3);
expect(dispatchSpy2).toHaveBeenCalledTimes(1);
expect(prepareSpy2).toHaveBeenCalledTimes(1);
expect(dispatchSpy3).toHaveBeenCalledTimes(0);
expect(prepareSpy3).toHaveBeenCalledTimes(0);
});
});

View File

@@ -0,0 +1,769 @@
/* eslint-disable @typescript-eslint/no-unsafe-argument */
/* eslint-disable @typescript-eslint/dot-notation */
import {
createVoiceConnection,
VoiceConnection,
VoiceConnectionConnectingState,
VoiceConnectionDisconnectReason,
VoiceConnectionReadyState,
VoiceConnectionSignallingState,
VoiceConnectionStatus,
} from '../src/VoiceConnection';
import * as _DataStore from '../src/DataStore';
import * as _Networking from '../src/networking/Networking';
import * as _AudioPlayer from '../src/audio/AudioPlayer';
import { PlayerSubscription as _PlayerSubscription } from '../src/audio/PlayerSubscription';
import type { DiscordGatewayAdapterLibraryMethods } from '../src/util/adapter';
import EventEmitter from 'node:events';
jest.mock('../audio/AudioPlayer');
jest.mock('../audio/PlayerSubscription');
jest.mock('../DataStore');
jest.mock('../networking/Networking');
const DataStore = _DataStore as unknown as jest.Mocked<typeof _DataStore>;
const Networking = _Networking as unknown as jest.Mocked<typeof _Networking>;
const AudioPlayer = _AudioPlayer as unknown as jest.Mocked<typeof _AudioPlayer>;
const PlayerSubscription = _PlayerSubscription as unknown as jest.Mock<_PlayerSubscription>;
Networking.Networking.mockImplementation(function mockedConstructor() {
this.state = {};
return this;
});
function createFakeAdapter() {
const sendPayload = jest.fn();
sendPayload.mockReturnValue(true);
const destroy = jest.fn();
const libMethods: Partial<DiscordGatewayAdapterLibraryMethods> = {};
return {
sendPayload,
destroy,
libMethods,
creator: jest.fn((methods) => {
Object.assign(libMethods, methods);
return {
sendPayload,
destroy,
};
}),
};
}
function createJoinConfig() {
return {
channelId: '1',
guildId: '2',
selfDeaf: true,
selfMute: false,
group: 'default',
};
}
function createFakeVoiceConnection() {
const adapter = createFakeAdapter();
const joinConfig = createJoinConfig();
const voiceConnection = new VoiceConnection(joinConfig, {
debug: false,
adapterCreator: adapter.creator,
});
return { adapter, joinConfig, voiceConnection };
}
beforeEach(() => {
DataStore.createJoinVoiceChannelPayload.mockReset();
DataStore.getVoiceConnection.mockReset();
DataStore.trackVoiceConnection.mockReset();
DataStore.untrackVoiceConnection.mockReset();
});
describe('createVoiceConnection', () => {
test('New voice connection', () => {
const mockPayload = Symbol('mock') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => mockPayload);
const adapter = createFakeAdapter();
const joinConfig = createJoinConfig();
const voiceConnection = createVoiceConnection(joinConfig, {
debug: false,
adapterCreator: adapter.creator,
});
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
expect(DataStore.getVoiceConnection).toHaveBeenCalledTimes(1);
expect(DataStore.trackVoiceConnection).toHaveBeenCalledWith(voiceConnection);
expect(DataStore.untrackVoiceConnection).not.toHaveBeenCalled();
expect(adapter.sendPayload).toHaveBeenCalledWith(mockPayload);
});
test('New voice connection with adapter failure', () => {
const mockPayload = Symbol('mock') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => mockPayload);
const adapter = createFakeAdapter();
adapter.sendPayload.mockReturnValue(false);
const joinConfig = createJoinConfig();
const voiceConnection = createVoiceConnection(joinConfig, {
debug: false,
adapterCreator: adapter.creator,
});
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Disconnected);
expect(DataStore.getVoiceConnection).toHaveBeenCalledTimes(1);
expect(DataStore.trackVoiceConnection).toHaveBeenCalledWith(voiceConnection);
expect(DataStore.untrackVoiceConnection).not.toHaveBeenCalled();
expect(adapter.sendPayload).toHaveBeenCalledWith(mockPayload);
});
test('Reconfiguring existing connection', () => {
const mockPayload = Symbol('mock') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => mockPayload);
const existingAdapter = createFakeAdapter();
const existingJoinConfig = createJoinConfig();
const existingVoiceConnection = new VoiceConnection(existingJoinConfig, {
debug: false,
adapterCreator: existingAdapter.creator,
});
const stateSetter = jest.spyOn(existingVoiceConnection, 'state', 'set');
DataStore.getVoiceConnection.mockImplementation((guildId) =>
guildId === existingJoinConfig.guildId ? existingVoiceConnection : null,
);
const newAdapter = createFakeAdapter();
const newJoinConfig = createJoinConfig();
const newVoiceConnection = createVoiceConnection(newJoinConfig, {
debug: false,
adapterCreator: newAdapter.creator,
});
expect(DataStore.getVoiceConnection).toHaveBeenCalledWith(newJoinConfig.guildId);
expect(DataStore.trackVoiceConnection).not.toHaveBeenCalled();
expect(DataStore.untrackVoiceConnection).not.toHaveBeenCalled();
expect(newAdapter.creator).not.toHaveBeenCalled();
expect(existingAdapter.sendPayload).toHaveBeenCalledWith(mockPayload);
expect(newVoiceConnection).toBe(existingVoiceConnection);
expect(stateSetter).not.toHaveBeenCalled();
});
test('Calls rejoin() on existing disconnected connection', () => {
const mockPayload = Symbol('mock') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => mockPayload);
const existingAdapter = createFakeAdapter();
const existingJoinConfig = createJoinConfig();
const existingVoiceConnection = new VoiceConnection(existingJoinConfig, {
debug: false,
adapterCreator: existingAdapter.creator,
});
existingVoiceConnection.state = {
status: VoiceConnectionStatus.Disconnected,
adapter: existingAdapter,
reason: VoiceConnectionDisconnectReason.EndpointRemoved,
};
const rejoinSpy = jest.spyOn(existingVoiceConnection, 'rejoin');
DataStore.getVoiceConnection.mockImplementation((guildId) =>
guildId === existingJoinConfig.guildId ? existingVoiceConnection : null,
);
const newAdapter = createFakeAdapter();
const newJoinConfig = createJoinConfig();
const { guildId, group, ...rejoinConfig } = newJoinConfig;
const newVoiceConnection = createVoiceConnection(newJoinConfig, {
debug: false,
adapterCreator: newAdapter.creator,
});
expect(DataStore.getVoiceConnection).toHaveBeenCalledWith(newJoinConfig.guildId);
expect(DataStore.trackVoiceConnection).not.toHaveBeenCalled();
expect(DataStore.untrackVoiceConnection).not.toHaveBeenCalled();
expect(newAdapter.creator).not.toHaveBeenCalled();
expect(rejoinSpy).toHaveBeenCalledWith(rejoinConfig);
expect(newVoiceConnection).toBe(existingVoiceConnection);
});
test('Reconfiguring existing connection with adapter failure', () => {
const mockPayload = Symbol('mock') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => mockPayload);
const existingAdapter = createFakeAdapter();
const existingJoinConfig = createJoinConfig();
const existingVoiceConnection = new VoiceConnection(existingJoinConfig, {
debug: false,
adapterCreator: existingAdapter.creator,
});
DataStore.getVoiceConnection.mockImplementation((guildId) =>
guildId === existingJoinConfig.guildId ? existingVoiceConnection : null,
);
const newAdapter = createFakeAdapter();
const newJoinConfig = createJoinConfig();
existingAdapter.sendPayload.mockReturnValue(false);
const newVoiceConnection = createVoiceConnection(newJoinConfig, {
debug: false,
adapterCreator: newAdapter.creator,
});
expect(DataStore.getVoiceConnection).toHaveBeenCalledWith(newJoinConfig.guildId);
expect(DataStore.trackVoiceConnection).not.toHaveBeenCalled();
expect(DataStore.untrackVoiceConnection).not.toHaveBeenCalled();
expect(newAdapter.creator).not.toHaveBeenCalled();
expect(existingAdapter.sendPayload).toHaveBeenCalledWith(mockPayload);
expect(newVoiceConnection).toBe(existingVoiceConnection);
expect(newVoiceConnection.state.status).toBe(VoiceConnectionStatus.Disconnected);
});
});
describe('VoiceConnection#addServerPacket', () => {
test('Stores the packet and attempts to configure networking', () => {
const { voiceConnection } = createFakeVoiceConnection();
voiceConnection.configureNetworking = jest.fn();
const dummy = {
endpoint: 'discord.com',
guild_id: 123,
token: 'abc',
} as any;
voiceConnection['addServerPacket'](dummy);
expect(voiceConnection['packets'].server).toBe(dummy);
expect(voiceConnection.configureNetworking).toHaveBeenCalled();
});
test('Overwrites existing packet', () => {
const { voiceConnection } = createFakeVoiceConnection();
voiceConnection['packets'].server = Symbol('old') as any;
voiceConnection.configureNetworking = jest.fn();
const dummy = {
endpoint: 'discord.com',
guild_id: 123,
token: 'abc',
} as any;
voiceConnection['addServerPacket'](dummy);
expect(voiceConnection['packets'].server).toBe(dummy);
expect(voiceConnection.configureNetworking).toHaveBeenCalled();
});
test('Disconnects when given a null endpoint', () => {
const { voiceConnection } = createFakeVoiceConnection();
voiceConnection['packets'].server = Symbol('old') as any;
voiceConnection.configureNetworking = jest.fn();
const dummy = {
endpoint: null,
guild_id: 123,
token: 'abc',
} as any;
voiceConnection['addServerPacket'](dummy);
expect(voiceConnection['packets'].server).toBe(dummy);
expect(voiceConnection.configureNetworking).not.toHaveBeenCalled();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Disconnected);
});
});
describe('VoiceConnection#addStatePacket', () => {
test('State is assigned to joinConfig', () => {
const { voiceConnection } = createFakeVoiceConnection();
voiceConnection['addStatePacket']({
self_deaf: true,
self_mute: true,
channel_id: '123',
} as any);
expect(voiceConnection.joinConfig).toMatchObject({
selfDeaf: true,
selfMute: true,
channelId: '123',
});
voiceConnection['addStatePacket']({
self_mute: false,
} as any);
expect(voiceConnection.joinConfig).toMatchObject({
selfDeaf: true,
selfMute: false,
channelId: '123',
});
});
});
describe('VoiceConnection#configureNetworking', () => {
test('Only creates Networking instance when both packets are present and not destroyed', () => {
const { voiceConnection } = createFakeVoiceConnection();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
voiceConnection.configureNetworking();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
const adapter = (voiceConnection.state as VoiceConnectionSignallingState).adapter;
const state = {
session_id: 'abc',
user_id: '123',
} as any;
const server = {
endpoint: 'def',
guild_id: '123',
token: 'xyz',
} as any;
Object.assign(voiceConnection['packets'], { state, server: undefined });
voiceConnection.configureNetworking();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
expect(Networking.Networking).toHaveBeenCalledTimes(0);
Object.assign(voiceConnection['packets'], { state: undefined, server });
voiceConnection.configureNetworking();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
expect(Networking.Networking).toHaveBeenCalledTimes(0);
Object.assign(voiceConnection['packets'], { state, server });
voiceConnection.state = { status: VoiceConnectionStatus.Destroyed };
voiceConnection.configureNetworking();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Destroyed);
expect(Networking.Networking).toHaveBeenCalledTimes(0);
voiceConnection.state = { status: VoiceConnectionStatus.Signalling, adapter };
voiceConnection.configureNetworking();
expect(Networking.Networking).toHaveBeenCalledTimes(1);
expect(Networking.Networking).toHaveBeenCalledWith(
{
endpoint: server.endpoint,
serverId: server.guild_id,
token: server.token,
sessionId: state.session_id,
userId: state.user_id,
},
false,
);
expect(voiceConnection.state).toMatchObject({
status: VoiceConnectionStatus.Connecting,
adapter,
});
expect((voiceConnection.state as unknown as VoiceConnectionConnectingState).networking).toBeInstanceOf(
Networking.Networking,
);
});
});
describe('VoiceConnection#onNetworkingClose', () => {
test('Does nothing in destroyed state', () => {
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection.state = {
status: VoiceConnectionStatus.Destroyed,
};
voiceConnection['onNetworkingClose'](1000);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Destroyed);
expect(adapter.sendPayload).not.toHaveBeenCalled();
});
test('Disconnects for code 4014', () => {
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection['onNetworkingClose'](4014);
expect(voiceConnection.state).toMatchObject({
status: VoiceConnectionStatus.Disconnected,
closeCode: 4014,
});
expect(adapter.sendPayload).not.toHaveBeenCalled();
});
test('Attempts rejoin for codes != 4014', () => {
const dummyPayload = Symbol('dummy') as any;
const { voiceConnection, adapter, joinConfig } = createFakeVoiceConnection();
DataStore.createJoinVoiceChannelPayload.mockImplementation((config) =>
config === joinConfig ? dummyPayload : undefined,
);
voiceConnection['onNetworkingClose'](1234);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
expect(adapter.sendPayload).toHaveBeenCalledWith(dummyPayload);
expect(voiceConnection.rejoinAttempts).toBe(1);
});
test('Attempts rejoin for codes != 4014 (with adapter failure)', () => {
const dummyPayload = Symbol('dummy') as any;
const { voiceConnection, adapter, joinConfig } = createFakeVoiceConnection();
DataStore.createJoinVoiceChannelPayload.mockImplementation((config) =>
config === joinConfig ? dummyPayload : undefined,
);
adapter.sendPayload.mockReturnValue(false);
voiceConnection['onNetworkingClose'](1234);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Disconnected);
expect(adapter.sendPayload).toHaveBeenCalledWith(dummyPayload);
expect(voiceConnection.rejoinAttempts).toBe(1);
});
});
describe('VoiceConnection#onNetworkingStateChange', () => {
test('Does nothing when status code identical', () => {
const { voiceConnection } = createFakeVoiceConnection();
const stateSetter = jest.spyOn(voiceConnection, 'state', 'set');
voiceConnection['onNetworkingStateChange'](
{ code: _Networking.NetworkingStatusCode.Ready } as any,
{ code: _Networking.NetworkingStatusCode.Ready } as any,
);
voiceConnection['onNetworkingStateChange'](
{ code: _Networking.NetworkingStatusCode.Closed } as any,
{ code: _Networking.NetworkingStatusCode.Closed } as any,
);
expect(stateSetter).not.toHaveBeenCalled();
});
test('Does nothing when not in Ready or Connecting states', () => {
const { voiceConnection } = createFakeVoiceConnection();
const stateSetter = jest.spyOn(voiceConnection, 'state', 'set');
const call = [
{ code: _Networking.NetworkingStatusCode.Ready } as any,
{ code: _Networking.NetworkingStatusCode.Closed } as any,
];
voiceConnection['_state'] = { status: VoiceConnectionStatus.Signalling } as any;
voiceConnection['onNetworkingStateChange'](call[0], call[1]);
voiceConnection['_state'] = { status: VoiceConnectionStatus.Disconnected } as any;
voiceConnection['onNetworkingStateChange'](call[0], call[1]);
voiceConnection['_state'] = { status: VoiceConnectionStatus.Destroyed } as any;
voiceConnection['onNetworkingStateChange'](call[0], call[1]);
expect(stateSetter).not.toHaveBeenCalled();
});
test('Transitions to Ready', () => {
const { voiceConnection } = createFakeVoiceConnection();
const stateSetter = jest.spyOn(voiceConnection, 'state', 'set');
voiceConnection['_state'] = {
...(voiceConnection.state as VoiceConnectionSignallingState),
status: VoiceConnectionStatus.Connecting,
networking: new Networking.Networking({} as any, false),
};
voiceConnection['onNetworkingStateChange'](
{ code: _Networking.NetworkingStatusCode.Closed } as any,
{ code: _Networking.NetworkingStatusCode.Ready } as any,
);
expect(stateSetter).toHaveBeenCalledTimes(1);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Ready);
});
test('Transitions to Connecting', () => {
const { voiceConnection } = createFakeVoiceConnection();
const stateSetter = jest.spyOn(voiceConnection, 'state', 'set');
voiceConnection['_state'] = {
...(voiceConnection.state as VoiceConnectionSignallingState),
status: VoiceConnectionStatus.Connecting,
networking: new Networking.Networking({} as any, false),
};
voiceConnection['onNetworkingStateChange'](
{ code: _Networking.NetworkingStatusCode.Ready } as any,
{ code: _Networking.NetworkingStatusCode.Identifying } as any,
);
expect(stateSetter).toHaveBeenCalledTimes(1);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Connecting);
});
});
describe('VoiceConnection#destroy', () => {
test('Throws when in Destroyed state', () => {
const { voiceConnection } = createFakeVoiceConnection();
voiceConnection.state = { status: VoiceConnectionStatus.Destroyed };
expect(() => voiceConnection.destroy()).toThrow();
});
test('Cleans up in a valid, destroyable state', () => {
const { voiceConnection, joinConfig, adapter } = createFakeVoiceConnection();
DataStore.getVoiceConnection.mockImplementation((guildId) =>
joinConfig.guildId === guildId ? voiceConnection : undefined,
);
const dummy = Symbol('dummy');
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => dummy as any);
voiceConnection.destroy();
expect(DataStore.getVoiceConnection).toHaveReturnedWith(voiceConnection);
expect(DataStore.untrackVoiceConnection).toHaveBeenCalledWith(voiceConnection);
expect(DataStore.createJoinVoiceChannelPayload.mock.calls[0][0]).toMatchObject({
channelId: null,
guildId: joinConfig.guildId,
});
expect(adapter.sendPayload).toHaveBeenCalledWith(dummy);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Destroyed);
});
});
describe('VoiceConnection#disconnect', () => {
test('Fails in Destroyed and Signalling states', () => {
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection.state = { status: VoiceConnectionStatus.Destroyed };
expect(voiceConnection.disconnect()).toBe(false);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Destroyed);
voiceConnection.state = { status: VoiceConnectionStatus.Signalling, adapter };
expect(voiceConnection.disconnect()).toBe(false);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
});
test('Disconnects - available adapter', () => {
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection.state = {
status: VoiceConnectionStatus.Ready,
adapter,
networking: new Networking.Networking({} as any, false),
};
const leavePayload = Symbol('dummy');
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => leavePayload as any);
expect(voiceConnection.disconnect()).toBe(true);
expect(voiceConnection.joinConfig).toMatchObject({
channelId: null,
guildId: '2',
selfDeaf: true,
selfMute: false,
});
expect(DataStore.createJoinVoiceChannelPayload).toHaveBeenCalledWith(voiceConnection.joinConfig);
expect(adapter.sendPayload).toHaveBeenCalledWith(leavePayload);
expect(voiceConnection.state).toMatchObject({
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.Manual,
});
});
test('Disconnects - unavailable adapter', () => {
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection.state = {
status: VoiceConnectionStatus.Ready,
adapter,
networking: new Networking.Networking({} as any, false),
};
adapter.sendPayload.mockImplementation(() => false);
expect(voiceConnection.disconnect()).toBe(false);
expect(voiceConnection.state).toMatchObject({
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.AdapterUnavailable,
});
});
});
describe('VoiceConnection#rejoin', () => {
test('Rejoins in a disconnected state', () => {
const dummy = Symbol('dummy') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => dummy);
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection.state = {
...(voiceConnection.state as VoiceConnectionSignallingState),
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.WebSocketClose,
closeCode: 1000,
};
expect(voiceConnection.rejoin()).toBe(true);
expect(voiceConnection.rejoinAttempts).toBe(1);
expect(adapter.sendPayload).toHaveBeenCalledWith(dummy);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Signalling);
});
test('Rejoins in a ready state', () => {
const dummy = Symbol('dummy') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => dummy);
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection.state = {
...(voiceConnection.state as VoiceConnectionReadyState),
status: VoiceConnectionStatus.Ready,
};
expect(voiceConnection.rejoin()).toBe(true);
expect(voiceConnection.rejoinAttempts).toBe(0);
expect(adapter.sendPayload).toHaveBeenCalledWith(dummy);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Ready);
});
test('Stays in the disconnected state when the adapter fails', () => {
const dummy = Symbol('dummy') as any;
DataStore.createJoinVoiceChannelPayload.mockImplementation(() => dummy);
const { voiceConnection, adapter } = createFakeVoiceConnection();
voiceConnection.state = {
...(voiceConnection.state as VoiceConnectionSignallingState),
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.WebSocketClose,
closeCode: 1000,
};
adapter.sendPayload.mockReturnValue(false);
expect(voiceConnection.rejoin()).toBe(false);
expect(voiceConnection.rejoinAttempts).toBe(1);
expect(adapter.sendPayload).toHaveBeenCalledWith(dummy);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Disconnected);
});
});
describe('VoiceConnection#subscribe', () => {
test('Does nothing in Destroyed state', () => {
const { voiceConnection } = createFakeVoiceConnection();
const player = new AudioPlayer.AudioPlayer();
player['subscribe'] = jest.fn();
voiceConnection.state = { status: VoiceConnectionStatus.Destroyed };
expect(voiceConnection.subscribe(player)).toBeUndefined();
expect(player['subscribe']).not.toHaveBeenCalled();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Destroyed);
});
test('Subscribes in a live state', () => {
const { voiceConnection } = createFakeVoiceConnection();
const adapter = (voiceConnection.state as VoiceConnectionSignallingState).adapter;
const player = new AudioPlayer.AudioPlayer();
const dummy = Symbol('dummy');
player['subscribe'] = jest.fn().mockImplementation(() => dummy);
expect(voiceConnection.subscribe(player)).toBe(dummy);
expect(player['subscribe']).toHaveBeenCalledWith(voiceConnection);
expect(voiceConnection.state).toMatchObject({
status: VoiceConnectionStatus.Signalling,
adapter,
});
});
});
describe('VoiceConnection#onSubscriptionRemoved', () => {
test('Does nothing in Destroyed state', () => {
const { voiceConnection } = createFakeVoiceConnection();
const subscription = new PlayerSubscription(voiceConnection, new AudioPlayer.AudioPlayer());
subscription.unsubscribe = jest.fn();
voiceConnection.state = { status: VoiceConnectionStatus.Destroyed };
voiceConnection['onSubscriptionRemoved'](subscription);
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Destroyed);
expect(subscription.unsubscribe).not.toHaveBeenCalled();
});
test('Does nothing when subscription is not the same as the stored one', () => {
const { voiceConnection } = createFakeVoiceConnection();
const subscription = new PlayerSubscription(voiceConnection, new AudioPlayer.AudioPlayer());
subscription.unsubscribe = jest.fn();
voiceConnection.state = { ...(voiceConnection.state as VoiceConnectionSignallingState), subscription };
voiceConnection['onSubscriptionRemoved'](Symbol('new subscription') as any);
expect(voiceConnection.state).toMatchObject({
status: VoiceConnectionStatus.Signalling,
subscription,
});
expect(subscription.unsubscribe).not.toHaveBeenCalled();
});
test('Unsubscribes in a live state with matching subscription', () => {
const { voiceConnection } = createFakeVoiceConnection();
const subscription = new PlayerSubscription(voiceConnection, new AudioPlayer.AudioPlayer());
subscription.unsubscribe = jest.fn();
voiceConnection.state = { ...(voiceConnection.state as VoiceConnectionSignallingState), subscription };
voiceConnection['onSubscriptionRemoved'](subscription);
expect(voiceConnection.state).toEqual({
...voiceConnection.state,
subscription: undefined,
});
expect(subscription.unsubscribe).toHaveBeenCalledTimes(1);
});
describe('updateReceiveBindings', () => {
test('Applies and removes udp listeners', () => {
// Arrange
const ws = new EventEmitter() as any;
const oldNetworking = new Networking.Networking({} as any, false);
oldNetworking.state = {
code: _Networking.NetworkingStatusCode.Ready,
connectionData: {} as any,
connectionOptions: {} as any,
udp: new EventEmitter() as any,
ws,
};
const newNetworking = new Networking.Networking({} as any, false);
newNetworking.state = {
...oldNetworking.state,
udp: new EventEmitter() as any,
};
const { voiceConnection } = createFakeVoiceConnection();
// Act
voiceConnection['updateReceiveBindings'](newNetworking.state, oldNetworking.state);
// Assert
expect(oldNetworking.state.udp.listenerCount('message')).toBe(0);
expect(newNetworking.state.udp.listenerCount('message')).toBe(1);
expect(voiceConnection.receiver.connectionData).toBe(newNetworking.state.connectionData);
});
test('Applies and removes ws listeners', () => {
// Arrange
const udp = new EventEmitter() as any;
const oldNetworking = new Networking.Networking({} as any, false);
oldNetworking.state = {
code: _Networking.NetworkingStatusCode.Ready,
connectionData: {} as any,
connectionOptions: {} as any,
udp,
ws: new EventEmitter() as any,
};
const newNetworking = new Networking.Networking({} as any, false);
newNetworking.state = {
...oldNetworking.state,
ws: new EventEmitter() as any,
};
const { voiceConnection } = createFakeVoiceConnection();
// Act
voiceConnection['updateReceiveBindings'](newNetworking.state, oldNetworking.state);
// Assert
expect(oldNetworking.state.ws.listenerCount('packet')).toBe(0);
expect(newNetworking.state.ws.listenerCount('packet')).toBe(1);
expect(voiceConnection.receiver.connectionData).toBe(newNetworking.state.connectionData);
});
test('Applies initial listeners', () => {
// Arrange
const newNetworking = new Networking.Networking({} as any, false);
newNetworking.state = {
code: _Networking.NetworkingStatusCode.Ready,
connectionData: {} as any,
connectionOptions: {} as any,
udp: new EventEmitter() as any,
ws: new EventEmitter() as any,
};
const { voiceConnection } = createFakeVoiceConnection();
// Act
voiceConnection['updateReceiveBindings'](newNetworking.state, undefined);
// Assert
expect(newNetworking.state.ws.listenerCount('packet')).toBe(1);
expect(newNetworking.state.udp.listenerCount('message')).toBe(1);
expect(voiceConnection.receiver.connectionData).toBe(newNetworking.state.connectionData);
});
});
});
describe('Adapter', () => {
test('onVoiceServerUpdate', () => {
const { adapter, voiceConnection } = createFakeVoiceConnection();
voiceConnection['addServerPacket'] = jest.fn();
const dummy = Symbol('dummy') as any;
adapter.libMethods.onVoiceServerUpdate(dummy);
expect(voiceConnection['addServerPacket']).toHaveBeenCalledWith(dummy);
});
test('onVoiceStateUpdate', () => {
const { adapter, voiceConnection } = createFakeVoiceConnection();
voiceConnection['addStatePacket'] = jest.fn();
const dummy = Symbol('dummy') as any;
adapter.libMethods.onVoiceStateUpdate(dummy);
expect(voiceConnection['addStatePacket']).toHaveBeenCalledWith(dummy);
});
test('destroy', () => {
const { adapter, voiceConnection } = createFakeVoiceConnection();
adapter.libMethods.destroy();
expect(voiceConnection.state.status).toBe(VoiceConnectionStatus.Destroyed);
expect(adapter.sendPayload).not.toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,42 @@
import { joinVoiceChannel } from '../src/joinVoiceChannel';
import * as VoiceConnection from '../src/VoiceConnection';
const adapterCreator = () => ({ destroy: jest.fn(), send: jest.fn() } as any);
const createVoiceConnection = jest.spyOn(VoiceConnection, 'createVoiceConnection');
beforeAll(() => {
createVoiceConnection.mockImplementation(() => null as any);
});
beforeEach(() => {
createVoiceConnection.mockClear();
});
describe('joinVoiceChannel', () => {
test('Uses default group', () => {
joinVoiceChannel({
channelId: '123',
guildId: '456',
adapterCreator,
});
expect(createVoiceConnection.mock.calls[0][0]).toMatchObject({
channelId: '123',
guildId: '456',
group: 'default',
});
});
test('Respects custom group', () => {
joinVoiceChannel({
channelId: '123',
guildId: '456',
group: 'abc',
adapterCreator,
});
expect(createVoiceConnection.mock.calls[0][0]).toMatchObject({
channelId: '123',
guildId: '456',
group: 'abc',
});
});
});

View File

@@ -0,0 +1,17 @@
/**
* @type {import('@babel/core').TransformOptions}
*/
module.exports = {
parserOpts: { strictMode: true },
sourceMaps: 'inline',
presets: [
[
'@babel/preset-env',
{
targets: { node: 'current' },
modules: 'commonjs',
},
],
'@babel/preset-typescript',
],
};

View File

@@ -0,0 +1 @@
## [View the documentation here.](https://discord.js.org/#/docs/voice)

View File

@@ -0,0 +1,5 @@
- name: General
files:
- name: Welcome
id: welcome
path: ../../README.md

View File

@@ -0,0 +1,8 @@
# Examples
| Example | Description |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [Basic](./basic) | A simple "Hello World" TypeScript example that plays an mp3 file. Notably, it works with discord.js v12 and so it also contains an example of creating an adapter |
| [Radio Bot](./radio-bot) | A fun JavaScript example of what you can create using @discordjs/voice. A radio bot that plays output from your speakers in a Discord voice channel |
| [Music Bot](./music-bot) | A TypeScript example of a YouTube music bot. Demonstrates how queues can be implemented and how to implement "good" disconnect/reconnection logic |
| [Recorder](./recorder) | An example of using voice receive to create a bot that can record audio from users |

View File

@@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@@ -0,0 +1,91 @@
# Basic Example
This example will demonstrate how to join a voice channel and play resources, with some best practice
assistance on making sure you aren't waiting indefinitely for things to happen.
To achieve this, the example sets some fairly arbitrary time constraints for things such as joining
voice channels and audio becoming available.
## Code snippet
This code snippet doesn't include any comments for brevity. If you want to see the full source code,
check the other files in this folder!
```ts
import { Client, VoiceChannel, Intents } from 'discord.js';
import {
joinVoiceChannel,
createAudioPlayer,
createAudioResource,
entersState,
StreamType,
AudioPlayerStatus,
VoiceConnectionStatus,
} from '@discordjs/voice';
import { createDiscordJSAdapter } from './adapter';
const player = createAudioPlayer();
function playSong() {
const resource = createAudioResource('https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3', {
inputType: StreamType.Arbitrary,
});
player.play(resource);
return entersState(player, AudioPlayerStatus.Playing, 5e3);
}
async function connectToChannel(channel: VoiceChannel) {
const connection = joinVoiceChannel({
channelId: channel.id,
guildId: channel.guild.id,
adapterCreator: createDiscordJSAdapter(channel),
});
try {
await entersState(connection, VoiceConnectionStatus.Ready, 30e3);
return connection;
} catch (error) {
connection.destroy();
throw error;
}
}
const client = new Client({
ws: { intents: [Intents.FLAGS.GUILDS, Intents.FLAGS.GUILD_MESSAGES, Intents.FLAGS.GUILD_VOICE_STATES] },
});
client.login('token here');
client.on('ready', async () => {
console.log('Discord.js client is ready!');
try {
await playSong();
console.log('Song is ready to play!');
} catch (error) {
console.error(error);
}
});
client.on('message', async (message) => {
if (!message.guild) return;
if (message.content === '-join') {
const channel = message.member?.voice.channel;
if (channel) {
try {
const connection = await connectToChannel(channel);
connection.subscribe(player);
message.reply('Playing now!');
} catch (error) {
console.error(error);
}
} else {
message.reply('Join a voice channel then try again!');
}
}
});
```

View File

@@ -0,0 +1,69 @@
import { DiscordGatewayAdapterCreator, DiscordGatewayAdapterLibraryMethods } from '../../';
import { VoiceChannel, Snowflake, Client, Constants, Guild } from 'discord.js';
import { GatewayVoiceServerUpdateDispatchData, GatewayVoiceStateUpdateDispatchData } from 'discord-api-types/v9';
const adapters = new Map<Snowflake, DiscordGatewayAdapterLibraryMethods>();
const trackedClients = new Set<Client>();
/**
* Tracks a Discord.js client, listening to VOICE_SERVER_UPDATE and VOICE_STATE_UPDATE events
*
* @param client - The Discord.js Client to track
*/
function trackClient(client: Client) {
if (trackedClients.has(client)) return;
trackedClients.add(client);
client.ws.on(Constants.WSEvents.VOICE_SERVER_UPDATE, (payload: GatewayVoiceServerUpdateDispatchData) => {
adapters.get(payload.guild_id)?.onVoiceServerUpdate(payload);
});
client.ws.on(Constants.WSEvents.VOICE_STATE_UPDATE, (payload: GatewayVoiceStateUpdateDispatchData) => {
if (payload.guild_id && payload.session_id && payload.user_id === client.user?.id) {
adapters.get(payload.guild_id)?.onVoiceStateUpdate(payload);
}
});
client.on(Constants.Events.SHARD_DISCONNECT, (_, shardID) => {
const guilds = trackedShards.get(shardID);
if (guilds) {
for (const guildID of guilds.values()) {
adapters.get(guildID)?.destroy();
}
}
trackedShards.delete(shardID);
});
}
const trackedShards = new Map<number, Set<Snowflake>>();
function trackGuild(guild: Guild) {
let guilds = trackedShards.get(guild.shardID);
if (!guilds) {
guilds = new Set();
trackedShards.set(guild.shardID, guilds);
}
guilds.add(guild.id);
}
/**
* Creates an adapter for a Voice Channel.
*
* @param channel - The channel to create the adapter for
*/
export function createDiscordJSAdapter(channel: VoiceChannel): DiscordGatewayAdapterCreator {
return (methods) => {
adapters.set(channel.guild.id, methods);
trackClient(channel.client);
trackGuild(channel.guild);
return {
sendPayload(data) {
if (channel.guild.shard.status === Constants.Status.READY) {
channel.guild.shard.send(data);
return true;
}
return false;
},
destroy() {
return adapters.delete(channel.guild.id);
},
};
};
}

View File

@@ -0,0 +1,152 @@
import { Client, VoiceChannel, Intents } from 'discord.js';
import {
joinVoiceChannel,
createAudioPlayer,
createAudioResource,
entersState,
StreamType,
AudioPlayerStatus,
VoiceConnectionStatus,
} from '@discordjs/voice';
import { createDiscordJSAdapter } from './adapter';
/**
* In this example, we are creating a single audio player that plays to a number of voice channels.
* The audio player will play a single track.
*/
/**
* Create the audio player. We will use this for all of our connections.
*/
const player = createAudioPlayer();
function playSong() {
/**
* Here we are creating an audio resource using a sample song freely available online
* (see https://www.soundhelix.com/audio-examples)
*
* We specify an arbitrary inputType. This means that we aren't too sure what the format of
* the input is, and that we'd like to have this converted into a format we can use. If we
* were using an Ogg or WebM source, then we could change this value. However, for now we
* will leave this as arbitrary.
*/
const resource = createAudioResource('https://www.soundhelix.com/examples/mp3/SoundHelix-Song-1.mp3', {
inputType: StreamType.Arbitrary,
});
/**
* We will now play this to the audio player. By default, the audio player will not play until
* at least one voice connection is subscribed to it, so it is fine to attach our resource to the
* audio player this early.
*/
player.play(resource);
/**
* Here we are using a helper function. It will resolve if the player enters the Playing
* state within 5 seconds, otherwise it will reject with an error.
*/
return entersState(player, AudioPlayerStatus.Playing, 5e3);
}
async function connectToChannel(channel: VoiceChannel) {
/**
* Here, we try to establish a connection to a voice channel. If we're already connected
* to this voice channel, @discordjs/voice will just return the existing connection for us!
*/
const connection = joinVoiceChannel({
channelId: channel.id,
guildId: channel.guild.id,
adapterCreator: createDiscordJSAdapter(channel),
});
/**
* If we're dealing with a connection that isn't yet Ready, we can set a reasonable
* time limit before giving up. In this example, we give the voice connection 30 seconds
* to enter the ready state before giving up.
*/
try {
/**
* Allow ourselves 30 seconds to join the voice channel. If we do not join within then,
* an error is thrown.
*/
await entersState(connection, VoiceConnectionStatus.Ready, 30e3);
/**
* At this point, the voice connection is ready within 30 seconds! This means we can
* start playing audio in the voice channel. We return the connection so it can be
* used by the caller.
*/
return connection;
} catch (error) {
/**
* At this point, the voice connection has not entered the Ready state. We should make
* sure to destroy it, and propagate the error by throwing it, so that the calling function
* is aware that we failed to connect to the channel.
*/
connection.destroy();
throw error;
}
}
/**
* Main code
* =========
* Here we will implement the helper functions that we have defined above.
*/
const client = new Client({
ws: { intents: [Intents.FLAGS.GUILDS, Intents.FLAGS.GUILD_MESSAGES, Intents.FLAGS.GUILD_VOICE_STATES] },
});
void client.login('token here');
client.on('ready', async () => {
console.log('Discord.js client is ready!');
/**
* Try to get our song ready to play for when the bot joins a voice channel
*/
try {
await playSong();
console.log('Song is ready to play!');
} catch (error) {
/**
* The song isn't ready to play for some reason :(
*/
console.error(error);
}
});
client.on('messageCreate', async (message) => {
if (!message.guild) return;
if (message.content === '-join') {
const channel = message.member?.voice.channel;
if (channel) {
/**
* The user is in a voice channel, try to connect.
*/
try {
const connection = await connectToChannel(channel);
/**
* We have successfully connected! Now we can subscribe our connection to
* the player. This means that the player will play audio in the user's
* voice channel.
*/
connection.subscribe(player);
await message.reply('Playing now!');
} catch (error) {
/**
* Unable to connect to the voice channel within 30 seconds :(
*/
console.error(error);
}
} else {
/**
* The user is not in a voice channel.
*/
void message.reply('Join a voice channel then try again!');
}
}
});

View File

@@ -0,0 +1,7 @@
{
"root": true,
"extends": "../../.eslintrc.json",
"parserOptions": {
"project": "./tsconfig.eslint.json"
}
}

View File

@@ -0,0 +1,3 @@
package-lock.json
auth.json
tsconfig.tsbuildinfo

View File

@@ -0,0 +1,31 @@
# Music Bot Example
This is an example of how to create a music bot using @discordjs/voice alongside [discord.js](https://github.com/discordjs/discord.js).
The focus of this example is on how to create a robust music system using this library. The example explores error recovery, reconnection logic and implementation of a queue that won't lock up.
If you're looking to make your own music bot that is fairly simple, this example is a great place to start.
## Usage
```bash
# Clone the main repository, and then run:
$ npm install
$ npm run build
# Open this example and install dependencies
$ cd examples/music-bot
$ npm install
# Set a bot token (see auth.example.json)
$ nano auth.json
# Start the bot!
$ npm start
```
## Code structure
The bot code has been separated from the code that is specific to @discordjs/voice as much as possible. Within `src/music`, you will find code that is specific to this library and you can take inspiration from this when building your own music system.
On the other hand, `src/bot.ts` is discord.js-specific code that interacts with the music system above, as well as handling user commands given on Discord. This example uses a development build of Discord.js that supports slash commands.

View File

@@ -0,0 +1,3 @@
{
"token": "Your Discord bot token here"
}

View File

@@ -0,0 +1,28 @@
{
"name": "music-bot",
"version": "0.0.1",
"description": "An example music bot written using @discordjs/voice",
"scripts": {
"start": "npm run build && node -r tsconfig-paths/register dist/bot",
"test": "echo \"Error: no test specified\" && exit 1",
"lint": "eslint src --ext .ts",
"lint:fix": "eslint src --ext .ts --fix",
"prettier": "prettier --write **/*.{ts,js,json,yml,yaml}",
"build": "tsc",
"build:check": "tsc --noEmit --incremental false"
},
"author": "Amish Shah <contact@shah.gg>",
"license": "MIT",
"dependencies": {
"@discordjs/opus": "^0.5.0",
"discord-api-types": "^0.19.0",
"discord.js": "^13.0.0-dev.328501b.1626912223",
"libsodium-wrappers": "^0.7.9",
"youtube-dl-exec": "^1.2.4",
"ytdl-core": "^4.8.3"
},
"devDependencies": {
"tsconfig-paths": "^3.9.0",
"typescript": "~4.2.2"
}
}

View File

@@ -0,0 +1,188 @@
import Discord, { Interaction, GuildMember, Snowflake } from 'discord.js';
import {
AudioPlayerStatus,
AudioResource,
entersState,
joinVoiceChannel,
VoiceConnectionStatus,
} from '@discordjs/voice';
import { Track } from './music/track';
import { MusicSubscription } from './music/subscription';
// eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-require-imports
const { token } = require('../auth.json');
const client = new Discord.Client({ intents: ['GUILD_VOICE_STATES', 'GUILD_MESSAGES', 'GUILDS'] });
client.on('ready', () => console.log('Ready!'));
// This contains the setup code for creating slash commands in a guild. The owner of the bot can send "!deploy" to create them.
client.on('messageCreate', async (message) => {
if (!message.guild) return;
if (!client.application?.owner) await client.application?.fetch();
if (message.content.toLowerCase() === '!deploy' && message.author.id === client.application?.owner?.id) {
await message.guild.commands.set([
{
name: 'play',
description: 'Plays a song',
options: [
{
name: 'song',
type: 'STRING' as const,
description: 'The URL of the song to play',
required: true,
},
],
},
{
name: 'skip',
description: 'Skip to the next song in the queue',
},
{
name: 'queue',
description: 'See the music queue',
},
{
name: 'pause',
description: 'Pauses the song that is currently playing',
},
{
name: 'resume',
description: 'Resume playback of the current song',
},
{
name: 'leave',
description: 'Leave the voice channel',
},
]);
await message.reply('Deployed!');
}
});
/**
* Maps guild IDs to music subscriptions, which exist if the bot has an active VoiceConnection to the guild.
*/
const subscriptions = new Map<Snowflake, MusicSubscription>();
// Handles slash command interactions
client.on('interactionCreate', async (interaction: Interaction) => {
if (!interaction.isCommand() || !interaction.guildId) return;
let subscription = subscriptions.get(interaction.guildId);
if (interaction.commandName === 'play') {
await interaction.defer();
// Extract the video URL from the command
const url = interaction.options.get('song')!.value! as string;
// If a connection to the guild doesn't already exist and the user is in a voice channel, join that channel
// and create a subscription.
if (!subscription) {
if (interaction.member instanceof GuildMember && interaction.member.voice.channel) {
const channel = interaction.member.voice.channel;
subscription = new MusicSubscription(
joinVoiceChannel({
channelId: channel.id,
guildId: channel.guild.id,
adapterCreator: channel.guild.voiceAdapterCreator,
}),
);
subscription.voiceConnection.on('error', console.warn);
subscriptions.set(interaction.guildId, subscription);
}
}
// If there is no subscription, tell the user they need to join a channel.
if (!subscription) {
await interaction.followUp('Join a voice channel and then try that again!');
return;
}
// Make sure the connection is ready before processing the user's request
try {
await entersState(subscription.voiceConnection, VoiceConnectionStatus.Ready, 20e3);
} catch (error) {
console.warn(error);
await interaction.followUp('Failed to join voice channel within 20 seconds, please try again later!');
return;
}
try {
// Attempt to create a Track from the user's video URL
const track = await Track.from(url, {
onStart() {
interaction.followUp({ content: 'Now playing!', ephemeral: true }).catch(console.warn);
},
onFinish() {
interaction.followUp({ content: 'Now finished!', ephemeral: true }).catch(console.warn);
},
onError(error) {
console.warn(error);
interaction.followUp({ content: `Error: ${error.message}`, ephemeral: true }).catch(console.warn);
},
});
// Enqueue the track and reply a success message to the user
subscription.enqueue(track);
await interaction.followUp(`Enqueued **${track.title}**`);
} catch (error) {
console.warn(error);
await interaction.followUp('Failed to play track, please try again later!');
}
} else if (interaction.commandName === 'skip') {
if (subscription) {
// Calling .stop() on an AudioPlayer causes it to transition into the Idle state. Because of a state transition
// listener defined in music/subscription.ts, transitions into the Idle state mean the next track from the queue
// will be loaded and played.
subscription.audioPlayer.stop();
await interaction.reply('Skipped song!');
} else {
await interaction.reply('Not playing in this server!');
}
} else if (interaction.commandName === 'queue') {
// Print out the current queue, including up to the next 5 tracks to be played.
if (subscription) {
const current =
subscription.audioPlayer.state.status === AudioPlayerStatus.Idle
? `Nothing is currently playing!`
: `Playing **${(subscription.audioPlayer.state.resource as AudioResource<Track>).metadata.title}**`;
const queue = subscription.queue
.slice(0, 5)
.map((track, index) => `${index + 1}) ${track.title}`)
.join('\n');
await interaction.reply(`${current}\n\n${queue}`);
} else {
await interaction.reply('Not playing in this server!');
}
} else if (interaction.commandName === 'pause') {
if (subscription) {
subscription.audioPlayer.pause();
await interaction.reply({ content: `Paused!`, ephemeral: true });
} else {
await interaction.reply('Not playing in this server!');
}
} else if (interaction.commandName === 'resume') {
if (subscription) {
subscription.audioPlayer.unpause();
await interaction.reply({ content: `Unpaused!`, ephemeral: true });
} else {
await interaction.reply('Not playing in this server!');
}
} else if (interaction.commandName === 'leave') {
if (subscription) {
subscription.voiceConnection.destroy();
subscriptions.delete(interaction.guildId);
await interaction.reply({ content: `Left channel!`, ephemeral: true });
} else {
await interaction.reply('Not playing in this server!');
}
} else {
await interaction.reply('Unknown command');
}
});
client.on('error', console.warn);
void client.login(token);

View File

@@ -0,0 +1,156 @@
import {
AudioPlayer,
AudioPlayerStatus,
AudioResource,
createAudioPlayer,
entersState,
VoiceConnection,
VoiceConnectionDisconnectReason,
VoiceConnectionStatus,
} from '@discordjs/voice';
import type { Track } from './track';
import { promisify } from 'node:util';
const wait = promisify(setTimeout);
/**
* A MusicSubscription exists for each active VoiceConnection. Each subscription has its own audio player and queue,
* and it also attaches logic to the audio player and voice connection for error handling and reconnection logic.
*/
export class MusicSubscription {
public readonly voiceConnection: VoiceConnection;
public readonly audioPlayer: AudioPlayer;
public queue: Track[];
public queueLock = false;
public readyLock = false;
public constructor(voiceConnection: VoiceConnection) {
this.voiceConnection = voiceConnection;
this.audioPlayer = createAudioPlayer();
this.queue = [];
this.voiceConnection.on(
'stateChange',
async (_: any, newState: { status: any; reason: any; closeCode: number }) => {
if (newState.status === VoiceConnectionStatus.Disconnected) {
if (newState.reason === VoiceConnectionDisconnectReason.WebSocketClose && newState.closeCode === 4014) {
/**
* If the WebSocket closed with a 4014 code, this means that we should not manually attempt to reconnect,
* but there is a chance the connection will recover itself if the reason of the disconnect was due to
* switching voice channels. This is also the same code for the bot being kicked from the voice channel,
* so we allow 5 seconds to figure out which scenario it is. If the bot has been kicked, we should destroy
* the voice connection.
*/
try {
await entersState(this.voiceConnection, VoiceConnectionStatus.Connecting, 5_000);
// Probably moved voice channel
} catch {
this.voiceConnection.destroy();
// Probably removed from voice channel
}
} else if (this.voiceConnection.rejoinAttempts < 5) {
/**
* The disconnect in this case is recoverable, and we also have <5 repeated attempts so we will reconnect.
*/
await wait((this.voiceConnection.rejoinAttempts + 1) * 5_000);
this.voiceConnection.rejoin();
} else {
/**
* The disconnect in this case may be recoverable, but we have no more remaining attempts - destroy.
*/
this.voiceConnection.destroy();
}
} else if (newState.status === VoiceConnectionStatus.Destroyed) {
/**
* Once destroyed, stop the subscription.
*/
this.stop();
} else if (
!this.readyLock &&
(newState.status === VoiceConnectionStatus.Connecting || newState.status === VoiceConnectionStatus.Signalling)
) {
/**
* In the Signalling or Connecting states, we set a 20 second time limit for the connection to become ready
* before destroying the voice connection. This stops the voice connection permanently existing in one of these
* states.
*/
this.readyLock = true;
try {
await entersState(this.voiceConnection, VoiceConnectionStatus.Ready, 20_000);
} catch {
if (this.voiceConnection.state.status !== VoiceConnectionStatus.Destroyed) this.voiceConnection.destroy();
} finally {
this.readyLock = false;
}
}
},
);
// Configure audio player
this.audioPlayer.on(
'stateChange',
(oldState: { status: any; resource: any }, newState: { status: any; resource: any }) => {
if (newState.status === AudioPlayerStatus.Idle && oldState.status !== AudioPlayerStatus.Idle) {
// If the Idle state is entered from a non-Idle state, it means that an audio resource has finished playing.
// The queue is then processed to start playing the next track, if one is available.
(oldState.resource as AudioResource<Track>).metadata.onFinish();
void this.processQueue();
} else if (newState.status === AudioPlayerStatus.Playing) {
// If the Playing state has been entered, then a new track has started playback.
(newState.resource as AudioResource<Track>).metadata.onStart();
}
},
);
this.audioPlayer.on('error', (error: { resource: any }) =>
(error.resource as AudioResource<Track>).metadata.onError(error),
);
voiceConnection.subscribe(this.audioPlayer);
}
/**
* Adds a new Track to the queue.
*
* @param track The track to add to the queue
*/
public enqueue(track: Track) {
this.queue.push(track);
void this.processQueue();
}
/**
* Stops audio playback and empties the queue.
*/
public stop() {
this.queueLock = true;
this.queue = [];
this.audioPlayer.stop(true);
}
/**
* Attempts to play a Track from the queue.
*/
private async processQueue(): Promise<void> {
// If the queue is locked (already being processed), is empty, or the audio player is already playing something, return
if (this.queueLock || this.audioPlayer.state.status !== AudioPlayerStatus.Idle || this.queue.length === 0) {
return;
}
// Lock the queue to guarantee safe access
this.queueLock = true;
// Take the first item from the queue. This is guaranteed to exist due to the non-empty check above.
const nextTrack = this.queue.shift()!;
try {
// Attempt to convert the Track into an AudioResource (i.e. start streaming the video)
const resource = await nextTrack.createAudioResource();
this.audioPlayer.play(resource);
this.queueLock = false;
} catch (error) {
// If an error occurred, try the next item of the queue instead
nextTrack.onError(error as Error);
this.queueLock = false;
return this.processQueue();
}
}
}

View File

@@ -0,0 +1,113 @@
import { getInfo } from 'ytdl-core';
import { AudioResource, createAudioResource, demuxProbe } from '@discordjs/voice';
import { raw as ytdl } from 'youtube-dl-exec';
/**
* This is the data required to create a Track object.
*/
export interface TrackData {
url: string;
title: string;
onStart: () => void;
onFinish: () => void;
onError: (error: Error) => void;
}
// eslint-disable-next-line @typescript-eslint/no-empty-function
const noop = () => {};
/**
* A Track represents information about a YouTube video (in this context) that can be added to a queue.
* It contains the title and URL of the video, as well as functions onStart, onFinish, onError, that act
* as callbacks that are triggered at certain points during the track's lifecycle.
*
* Rather than creating an AudioResource for each video immediately and then keeping those in a queue,
* we use tracks as they don't pre-emptively load the videos. Instead, once a Track is taken from the
* queue, it is converted into an AudioResource just in time for playback.
*/
export class Track implements TrackData {
public readonly url: string;
public readonly title: string;
public readonly onStart: () => void;
public readonly onFinish: () => void;
public readonly onError: (error: Error) => void;
private constructor({ url, title, onStart, onFinish, onError }: TrackData) {
this.url = url;
this.title = title;
this.onStart = onStart;
this.onFinish = onFinish;
this.onError = onError;
}
/**
* Creates an AudioResource from this Track.
*/
public createAudioResource(): Promise<AudioResource<Track>> {
return new Promise((resolve, reject) => {
const process = ytdl(
this.url,
{
o: '-',
q: '',
f: 'bestaudio[ext=webm+acodec=opus+asr=48000]/bestaudio',
r: '100K',
},
{ stdio: ['ignore', 'pipe', 'ignore'] },
);
if (!process.stdout) {
reject(new Error('No stdout'));
return;
}
const stream = process.stdout;
const onError = (error: Error) => {
if (!process.killed) process.kill();
stream.resume();
reject(error);
};
process
.once('spawn', () => {
demuxProbe(stream)
.then((probe: { stream: any; type: any }) =>
resolve(createAudioResource(probe.stream, { metadata: this, inputType: probe.type })),
)
.catch(onError);
})
.catch(onError);
});
}
/**
* Creates a Track from a video URL and lifecycle callback methods.
*
* @param url The URL of the video
* @param methods Lifecycle callbacks
*
* @returns The created Track
*/
public static async from(url: string, methods: Pick<Track, 'onStart' | 'onFinish' | 'onError'>): Promise<Track> {
const info = await getInfo(url);
// The methods are wrapped so that we can ensure that they are only called once.
const wrappedMethods = {
onStart() {
wrappedMethods.onStart = noop;
methods.onStart();
},
onFinish() {
wrappedMethods.onFinish = noop;
methods.onFinish();
},
onError(error: Error) {
wrappedMethods.onError = noop;
methods.onError(error);
},
};
return new Track({
title: info.videoDetails.title,
url,
...wrappedMethods,
});
}
}

View File

@@ -0,0 +1,3 @@
{
"extends": "./tsconfig.json"
}

View File

@@ -0,0 +1,17 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"baseUrl": ".",
"outDir": "dist",
"rootDir": "src",
"paths": {
"@discordjs/voice": ["../../"],
"@discordjs/opus": ["./node_modules/@discordjs/opus"],
"sodium": ["./node_modules/sodium"],
"libsodium-wrappers": ["./node_modules/libsodium-wrappers"],
"tweetnacl": ["./node_modules/tweetnacl"]
}
},
"include": ["src/**/*.ts"],
"exclude": [""]
}

View File

@@ -0,0 +1,2 @@
config.json
package-lock.json

View File

@@ -0,0 +1,64 @@
# Discord Radio Bot 🎧
A proof-of-concept radio bot that uses @discordjs/voice and discord.js. Streams audio from an audio output hardware device on your computer over a Discord voice channel.
**Works on:**
- Linux (via PulseAudio `pulse`)
- Windows (via DirectShow `dshow`)
## Usage
```bash
# Clone the main @discordjs/voice repo, then install dependencies and build
$ npm install
$ npm run build
# Enter this example's directory, create a config file and start!
$ cd examples/radio-bot
$ npm install
$ nano config.json
$ npm start
# Join a voice channel in Discord, then send "-join"
```
## Configuring on Windows via `dshow`
Run `ffmpeg -list_devices true -f dshow -i dummy` and observe output containing something similar:
```
DirectShow audio devices
"Stereo Mix (Realtek(R) Audio)"
Alternative name "@device_cm_{ID1}\wave_{ID2}"
```
For example, playing the above device will mirror audio from the speaker output of your machine. Your `config.json` should then be considered like so:
```json
{
"token": "discord_bot_token",
"device": "Stereo Mix (Realtek(R) Audio)",
"type": "dshow",
"maxTransmissionGap": 5000
}
```
## Configuring on Linux via `pulse`
Run `pactl list short sources` and observe output containing something similar:
```
5 alsa_output.pci.3.analog-stereo.monitor module-alsa-card.c s16le 2ch 44100Hz IDLE
```
Then configure your `config.json` with the device you'd like to use:
```json
{
"token": "discord_bot_token",
"device": "alsa_output.pci.3.analog-stereo.monitor",
"type": "pulse",
"maxTransmissionGap": 5000
}
```

View File

@@ -0,0 +1,6 @@
{
"token": "discord_bot_token",
"device": "audio_hw_device_id",
"type": "pulse",
"maxTransmissionGap": 5000
}

View File

@@ -0,0 +1,104 @@
require('module-alias/register');
const { Client } = require('discord.js');
const prism = require('prism-media');
const config = require('./config.json');
const {
NoSubscriberBehavior,
StreamType,
createAudioPlayer,
createAudioResource,
entersState,
AudioPlayerStatus,
VoiceConnectionStatus,
joinVoiceChannel,
} = require('@discordjs/voice');
const player = createAudioPlayer({
behaviors: {
noSubscriber: NoSubscriberBehavior.Play,
maxMissedFrames: Math.round(config.maxTransmissionGap / 20),
},
});
player.on('stateChange', (oldState, newState) => {
if (oldState.status === AudioPlayerStatus.Idle && newState.status === AudioPlayerStatus.Playing) {
console.log('Playing audio output on audio player');
} else if (newState.status === AudioPlayerStatus.Idle) {
console.log('Playback has stopped. Attempting to restart.');
attachRecorder();
}
});
function attachRecorder() {
player.play(
createAudioResource(
new prism.FFmpeg({
args: [
'-analyzeduration',
'0',
'-loglevel',
'0',
'-f',
config.type,
'-i',
config.type === 'dshow' ? `audio=${config.device}` : config.device,
'-acodec',
'libopus',
'-f',
'opus',
'-ar',
'48000',
'-ac',
'2',
],
}),
{
inputType: StreamType.OggOpus,
},
),
);
console.log('Attached recorder - ready to go!');
}
async function connectToChannel(channel) {
const connection = joinVoiceChannel({
channelId: channel.id,
guildId: channel.guild.id,
adapterCreator: channel.guild.voiceAdapterCreator,
});
try {
await entersState(connection, VoiceConnectionStatus.Ready, 30_000);
return connection;
} catch (error) {
connection.destroy();
throw error;
}
}
const client = new Client({ intents: ['GUILDS', 'GUILD_MESSAGES', 'GUILD_VOICE_STATES'] });
client.on('ready', async () => {
console.log('discord.js client is ready!');
attachRecorder();
});
client.on('messageCreate', async (message) => {
if (!message.guild) return;
if (message.content === '-join') {
const channel = message.member?.voice.channel;
if (channel) {
try {
const connection = await connectToChannel(channel);
connection.subscribe(player);
await message.reply('Playing now!');
} catch (error) {
console.error(error);
}
} else {
await message.reply('Join a voice channel then try again!');
}
}
});
void client.login(config.token);

View File

@@ -0,0 +1,32 @@
{
"name": "discord-radio-bot",
"version": "1.0.0",
"description": "A proof-of-concept radio bot for @discordjs/voice",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"start": "node index.js"
},
"keywords": [
"discord",
"radio",
"bot",
"audio",
"speakers",
"hardware",
"dj"
],
"author": "Amish Shah <amish@shah.gg>",
"license": "MIT",
"dependencies": {
"@discordjs/voice": "file:../../",
"discord.js": "^13.0.0-dev.f7eeccba4b7015496df811f10cc2da2b0fab0630",
"libsodium-wrappers": "^0.7.9",
"module-alias": "^2.2.2",
"prism-media": "^1.3.1"
},
"_moduleAliases": {
"@root": ".",
"@discordjs/voice": "../../",
"libsodium-wrappers": "./node_modules/libsodium-wrappers"
}
}

View File

@@ -0,0 +1,7 @@
{
"root": true,
"extends": "../../.eslintrc.json",
"parserOptions": {
"project": "./tsconfig.eslint.json"
}
}

View File

@@ -0,0 +1,4 @@
package-lock.json
auth.json
tsconfig.tsbuildinfo
recordings/*.ogg

View File

@@ -0,0 +1,23 @@
# 👂 Recorder Bot
This example shows how you can use the voice receive functionality in @discordjs/voice to record users in voice channels
and save the audio to local Ogg files.
## Usage
```sh-session
# Clone the main repository, and then run:
$ npm install
$ npm run build
# Open this example and install dependencies
$ cd examples/recorder
$ npm install
# Set a bot token (see auth.example.json)
$ cp auth.example.json auth.json
$ nano auth.json
# Start the bot!
$ npm start
```

View File

@@ -0,0 +1,3 @@
{
"token": "Your Discord bot token here"
}

View File

@@ -0,0 +1,28 @@
{
"name": "receiver-bot",
"version": "0.0.1",
"description": "An example receiver bot written using @discordjs/voice",
"scripts": {
"start": "npm run build && node -r tsconfig-paths/register dist/bot",
"test": "echo \"Error: no test specified\" && exit 1",
"lint": "eslint src --ext .ts",
"lint:fix": "eslint src --ext .ts --fix",
"prettier": "prettier --write **/*.{ts,js,json,yml,yaml}",
"build": "tsc",
"build:check": "tsc --noEmit --incremental false"
},
"author": "Amish Shah <contact@shah.gg>",
"license": "MIT",
"dependencies": {
"@discordjs/opus": "^0.5.3",
"discord-api-types": "^0.22.0",
"discord.js": "^13.0.1",
"libsodium-wrappers": "^0.7.9",
"node-crc": "^1.3.2",
"prism-media": "^2.0.0-alpha.0"
},
"devDependencies": {
"tsconfig-paths": "^3.10.1",
"typescript": "~4.3.5"
}
}

View File

@@ -0,0 +1,46 @@
import Discord, { Interaction } from 'discord.js';
import { getVoiceConnection } from '@discordjs/voice';
import { deploy } from './deploy';
import { interactionHandlers } from './interactions';
// eslint-disable-next-line @typescript-eslint/no-var-requires, @typescript-eslint/no-require-imports
const { token } = require('../auth.json');
const client = new Discord.Client({ intents: ['GUILD_VOICE_STATES', 'GUILD_MESSAGES', 'GUILDS'] });
client.on('ready', () => console.log('Ready!'));
client.on('messageCreate', async (message) => {
if (!message.guild) return;
if (!client.application?.owner) await client.application?.fetch();
if (message.content.toLowerCase() === '!deploy' && message.author.id === client.application?.owner?.id) {
await deploy(message.guild);
await message.reply('Deployed!');
}
});
/**
* The IDs of the users that can be recorded by the bot.
*/
const recordable = new Set<string>();
client.on('interactionCreate', async (interaction: Interaction) => {
if (!interaction.isCommand() || !interaction.guildId) return;
const handler = interactionHandlers.get(interaction.commandName);
try {
if (handler) {
await handler(interaction, recordable, client, getVoiceConnection(interaction.guildId));
} else {
await interaction.reply('Unknown command');
}
} catch (error) {
console.warn(error);
}
});
client.on('error', console.warn);
void client.login(token);

View File

@@ -0,0 +1,42 @@
import { EndBehaviorType, VoiceReceiver } from '@discordjs/voice';
import { User } from 'discord.js';
import { createWriteStream } from 'node:fs';
import prism from 'prism-media';
import { pipeline } from 'node:stream';
function getDisplayName(userId: string, user?: User) {
return user ? `${user.username}_${user.discriminator}` : userId;
}
export function createListeningStream(receiver: VoiceReceiver, userId: string, user?: User) {
const opusStream = receiver.subscribe(userId, {
end: {
behavior: EndBehaviorType.AfterSilence,
duration: 100,
},
});
const oggStream = new prism.opus.OggLogicalBitstream({
opusHead: new prism.opus.OpusHead({
channelCount: 2,
sampleRate: 48000,
}),
pageSizeControl: {
maxPackets: 10,
},
});
const filename = `./recordings/${Date.now()}-${getDisplayName(userId, user)}.ogg`;
const out = createWriteStream(filename);
console.log(`👂 Started recording ${filename}`);
pipeline(opusStream, oggStream, out, (err) => {
if (err) {
console.warn(`❌ Error recording file ${filename} - ${err.message}`);
} else {
console.log(`✅ Recorded ${filename}`);
}
});
}

View File

@@ -0,0 +1,26 @@
import { Guild } from 'discord.js';
export const deploy = async (guild: Guild) => {
await guild.commands.set([
{
name: 'join',
description: 'Joins the voice channel that you are in',
},
{
name: 'record',
description: 'Enables recording for a user',
options: [
{
name: 'speaker',
type: 'USER' as const,
description: 'The user to record',
required: true,
},
],
},
{
name: 'leave',
description: 'Leave the voice channel',
},
]);
};

View File

@@ -0,0 +1,92 @@
import { entersState, joinVoiceChannel, VoiceConnection, VoiceConnectionStatus } from '@discordjs/voice';
import { Client, CommandInteraction, GuildMember, Snowflake } from 'discord.js';
import { createListeningStream } from './createListeningStream';
async function join(
interaction: CommandInteraction,
recordable: Set<Snowflake>,
client: Client,
connection?: VoiceConnection,
) {
await interaction.deferReply();
if (!connection) {
if (interaction.member instanceof GuildMember && interaction.member.voice.channel) {
const channel = interaction.member.voice.channel;
connection = joinVoiceChannel({
channelId: channel.id,
guildId: channel.guild.id,
selfDeaf: false,
selfMute: true,
adapterCreator: channel.guild.voiceAdapterCreator,
});
} else {
await interaction.followUp('Join a voice channel and then try that again!');
return;
}
}
try {
await entersState(connection, VoiceConnectionStatus.Ready, 20e3);
const receiver = connection.receiver;
receiver.speaking.on('start', (userId) => {
if (recordable.has(userId)) {
createListeningStream(receiver, userId, client.users.cache.get(userId));
}
});
} catch (error) {
console.warn(error);
await interaction.followUp('Failed to join voice channel within 20 seconds, please try again later!');
}
await interaction.followUp('Ready!');
}
async function record(
interaction: CommandInteraction,
recordable: Set<Snowflake>,
client: Client,
connection?: VoiceConnection,
) {
if (connection) {
const userId = interaction.options.get('speaker')!.value! as Snowflake;
recordable.add(userId);
const receiver = connection.receiver;
if (connection.receiver.speaking.users.has(userId)) {
createListeningStream(receiver, userId, client.users.cache.get(userId));
}
await interaction.reply({ ephemeral: true, content: 'Listening!' });
} else {
await interaction.reply({ ephemeral: true, content: 'Join a voice channel and then try that again!' });
}
}
async function leave(
interaction: CommandInteraction,
recordable: Set<Snowflake>,
client: Client,
connection?: VoiceConnection,
) {
if (connection) {
connection.destroy();
recordable.clear();
await interaction.reply({ ephemeral: true, content: 'Left the channel!' });
} else {
await interaction.reply({ ephemeral: true, content: 'Not playing in this server!' });
}
}
export const interactionHandlers = new Map<
string,
(
interaction: CommandInteraction,
recordable: Set<Snowflake>,
client: Client,
connection?: VoiceConnection,
) => Promise<void>
>();
interactionHandlers.set('join', join);
interactionHandlers.set('record', record);
interactionHandlers.set('leave', leave);

View File

@@ -0,0 +1,3 @@
{
"extends": "./tsconfig.json"
}

View File

@@ -0,0 +1,13 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"baseUrl": ".",
"outDir": "dist",
"paths": {
"@discordjs/voice": ["../../"],
"libsodium-wrappers": ["./node_modules/libsodium-wrappers"]
}
},
"include": ["src/*.ts"],
"exclude": [""]
}

View File

@@ -0,0 +1,11 @@
/**
* @type {import('@jest/types').Config.InitialOptions}
*/
module.exports = {
testMatch: ['**/+(*.)+(spec|test).+(ts|js)?(x)'],
testEnvironment: 'node',
collectCoverage: true,
collectCoverageFrom: ['src/**/*.ts'],
coverageDirectory: 'coverage',
coverageReporters: ['text', 'lcov', 'clover'],
};

View File

@@ -0,0 +1,88 @@
{
"name": "@discordjs/voice",
"version": "0.7.5",
"description": "Implementation of the Discord Voice API for node.js",
"scripts": {
"build": "tsup && node scripts/postbuild.mjs",
"test": " jest --pass-with-no-tests --collect-coverage",
"lint": "eslint src --ext mjs,js,ts",
"lint:fix": "eslint src --ext mjs,js,ts --fix",
"format": "prettier --write **/*.{ts,js,json,yml,yaml}",
"docs": "typedoc --json docs/typedoc-out.json src/index.ts && node scripts/docs.mjs",
"prepublishOnly": "yarn build && yarn lint && yarn test",
"changelog": "git cliff --prepend ./CHANGELOG.md -l -c ../../cliff.toml -r ../../ --include-path './*'"
},
"main": "./dist/index.js",
"module": "./dist/index.mjs",
"typings": "./dist/index.d.ts",
"exports": {
"import": "./dist/index.mjs",
"require": "./dist/index.js"
},
"directories": {
"lib": "src",
"test": "__tests__"
},
"files": [
"dist"
],
"contributors": [
"Crawl <icrawltogo@gmail.com>",
"Amish Shah <amishshah.2k@gmail.com>",
"SpaceEEC <spaceeec@yahoo.com>",
"Vlad Frangu <kingdgrizzle@gmail.com>",
"Antonio Roman <kyradiscord@gmail.com>"
],
"license": "Apache-2.0",
"keywords": [
"discord",
"discord.js",
"audio",
"voice",
"streaming"
],
"repository": {
"type": "git",
"url": "git+https://github.com/discordjs/discord.js.git"
},
"bugs": {
"url": "https://github.com/discordjs/discord.js/issues"
},
"homepage": "https://discord.js.org",
"dependencies": {
"@types/ws": "^8.2.0",
"discord-api-types": "^0.26.0",
"prism-media": "^1.3.2",
"tiny-typed-emitter": "^2.1.0",
"tslib": "^2.3.1",
"ws": "^8.2.3"
},
"devDependencies": {
"@babel/core": "^7.16.0",
"@babel/preset-env": "^7.16.0",
"@babel/preset-typescript": "^7.16.0",
"@discordjs/ts-docgen": "^0.3.2",
"@types/jest": "^27.0.2",
"@types/node": "^16.11.7",
"@typescript-eslint/eslint-plugin": "^5.3.1",
"@typescript-eslint/parser": "^5.3.1",
"eslint": "^8.2.0",
"eslint-config-marine": "^9.0.6",
"eslint-config-prettier": "^8.3.0",
"eslint-plugin-prettier": "^4.0.0",
"jest": "^27.3.1",
"jest-websocket-mock": "^2.2.1",
"mock-socket": "^9.0.7",
"prettier": "^2.4.1",
"standard-version": "^9.3.2",
"tsup": "^5.7.0",
"typedoc": "^0.22.8",
"typescript": "^4.4.4"
},
"engines": {
"node": ">=16.0.0"
},
"publishConfig": {
"access": "public"
}
}

View File

@@ -0,0 +1,7 @@
import { runGenerator } from '@discordjs/ts-docgen';
runGenerator({
existingOutput: 'docs/typedoc-out.json',
custom: 'docs/index.yml',
output: 'docs/docs.json',
});

View File

@@ -0,0 +1,7 @@
import { readFile, writeFile } from 'node:fs/promises';
const data = await readFile('./dist/index.mjs', 'utf-8');
await writeFile(
'./dist/index.mjs',
`import{createRequire as topLevelCreateRequire}from"module";const require=topLevelCreateRequire(import.meta.url);${data}`,
);

View File

@@ -0,0 +1,189 @@
import { GatewayOpcodes } from 'discord-api-types/v9';
import type { AudioPlayer } from './audio';
import type { VoiceConnection } from './VoiceConnection';
export interface JoinConfig {
guildId: string;
channelId: string | null;
selfDeaf: boolean;
selfMute: boolean;
group: string;
}
/**
* Sends a voice state update to the main websocket shard of a guild, to indicate joining/leaving/moving across
* voice channels.
*
* @param config - The configuration to use when joining the voice channel
*/
export function createJoinVoiceChannelPayload(config: JoinConfig) {
return {
op: GatewayOpcodes.VoiceStateUpdate,
d: {
guild_id: config.guildId,
channel_id: config.channelId,
self_deaf: config.selfDeaf,
self_mute: config.selfMute,
},
};
}
// Voice Connections
const groups = new Map<string, Map<string, VoiceConnection>>();
groups.set('default', new Map());
function getOrCreateGroup(group: string) {
const existing = groups.get(group);
if (existing) return existing;
const map = new Map<string, VoiceConnection>();
groups.set(group, map);
return map;
}
/**
* Retrieves the map of group names to maps of voice connections. By default, all voice connections
* are created under the 'default' group.
*
* @returns The group map
*/
export function getGroups() {
return groups;
}
/**
* Retrieves all the voice connections under the 'default' group.
*
* @param group - The group to look up
*
* @returns The map of voice connections
*/
export function getVoiceConnections(group?: 'default'): Map<string, VoiceConnection>;
/**
* Retrieves all the voice connections under the given group name.
*
* @param group - The group to look up
*
* @returns The map of voice connections
*/
export function getVoiceConnections(group: string): Map<string, VoiceConnection> | undefined;
/**
* Retrieves all the voice connections under the given group name. Defaults to the 'default' group.
*
* @param group - The group to look up
*
* @returns The map of voice connections
*/
export function getVoiceConnections(group = 'default') {
return groups.get(group);
}
/**
* Finds a voice connection with the given guild id and group. Defaults to the 'default' group.
*
* @param guildId - The guild id of the voice connection
* @param group - the group that the voice connection was registered with
*
* @returns The voice connection, if it exists
*/
export function getVoiceConnection(guildId: string, group = 'default') {
return getVoiceConnections(group)?.get(guildId);
}
export function untrackVoiceConnection(voiceConnection: VoiceConnection) {
return getVoiceConnections(voiceConnection.joinConfig.group)?.delete(voiceConnection.joinConfig.guildId);
}
export function trackVoiceConnection(voiceConnection: VoiceConnection) {
return getOrCreateGroup(voiceConnection.joinConfig.group).set(voiceConnection.joinConfig.guildId, voiceConnection);
}
// Audio Players
// Each audio packet is 20ms long
const FRAME_LENGTH = 20;
let audioCycleInterval: NodeJS.Timeout | undefined;
let nextTime = -1;
/**
* A list of created audio players that are still active and haven't been destroyed.
*/
const audioPlayers: AudioPlayer[] = [];
/**
* Called roughly every 20 milliseconds. Dispatches audio from all players, and then gets the players to prepare
* the next audio frame.
*/
function audioCycleStep() {
if (nextTime === -1) return;
nextTime += FRAME_LENGTH;
const available = audioPlayers.filter((player) => player.checkPlayable());
// eslint-disable-next-line @typescript-eslint/dot-notation
available.forEach((player) => player['_stepDispatch']());
prepareNextAudioFrame(available);
}
/**
* Recursively gets the players that have been passed as parameters to prepare audio frames that can be played
* at the start of the next cycle.
*/
function prepareNextAudioFrame(players: AudioPlayer[]) {
const nextPlayer = players.shift();
if (!nextPlayer) {
if (nextTime !== -1) {
audioCycleInterval = setTimeout(() => audioCycleStep(), nextTime - Date.now());
}
return;
}
// eslint-disable-next-line @typescript-eslint/dot-notation
nextPlayer['_stepPrepare']();
// setImmediate to avoid long audio player chains blocking other scheduled tasks
setImmediate(() => prepareNextAudioFrame(players));
}
/**
* Checks whether or not the given audio player is being driven by the data store clock.
*
* @param target - The target to test for
*
* @returns `true` if it is being tracked, `false` otherwise
*/
export function hasAudioPlayer(target: AudioPlayer) {
return audioPlayers.includes(target);
}
/**
* Adds an audio player to the data store tracking list, if it isn't already there.
*
* @param player - The player to track
*/
export function addAudioPlayer(player: AudioPlayer) {
if (hasAudioPlayer(player)) return player;
audioPlayers.push(player);
if (audioPlayers.length === 1) {
nextTime = Date.now();
setImmediate(() => audioCycleStep());
}
return player;
}
/**
* Removes an audio player from the data store tracking list, if it is present there.
*/
export function deleteAudioPlayer(player: AudioPlayer) {
const index = audioPlayers.indexOf(player);
if (index === -1) return;
audioPlayers.splice(index, 1);
if (audioPlayers.length === 0) {
nextTime = -1;
if (typeof audioCycleInterval !== 'undefined') clearTimeout(audioCycleInterval);
}
}

View File

@@ -0,0 +1,723 @@
import type { GatewayVoiceServerUpdateDispatchData, GatewayVoiceStateUpdateDispatchData } from 'discord-api-types/v9';
import type { CreateVoiceConnectionOptions } from '.';
import type { AudioPlayer } from './audio/AudioPlayer';
import type { PlayerSubscription } from './audio/PlayerSubscription';
import {
getVoiceConnection,
createJoinVoiceChannelPayload,
trackVoiceConnection,
JoinConfig,
untrackVoiceConnection,
} from './DataStore';
import type { DiscordGatewayAdapterImplementerMethods } from './util/adapter';
import { Networking, NetworkingState, NetworkingStatusCode } from './networking/Networking';
import { Awaited, noop } from './util/util';
import { TypedEmitter } from 'tiny-typed-emitter';
import { VoiceReceiver } from './receive';
import type { VoiceWebSocket, VoiceUDPSocket } from './networking';
/**
* The various status codes a voice connection can hold at any one time.
*/
export enum VoiceConnectionStatus {
/**
* Sending a packet to the main Discord gateway to indicate we want to change our voice state.
*/
Signalling = 'signalling',
/**
* The `VOICE_SERVER_UPDATE` and `VOICE_STATE_UPDATE` packets have been received, now attempting to establish a voice connection.
*/
Connecting = 'connecting',
/**
* A voice connection has been established, and is ready to be used.
*/
Ready = 'ready',
/**
* The voice connection has either been severed or not established.
*/
Disconnected = 'disconnected',
/**
* The voice connection has been destroyed and untracked, it cannot be reused.
*/
Destroyed = 'destroyed',
}
/**
* The state that a VoiceConnection will be in when it is waiting to receive a VOICE_SERVER_UPDATE and
* VOICE_STATE_UPDATE packet from Discord, provided by the adapter.
*/
export interface VoiceConnectionSignallingState {
status: VoiceConnectionStatus.Signalling;
subscription?: PlayerSubscription;
adapter: DiscordGatewayAdapterImplementerMethods;
}
/**
* The reasons a voice connection can be in the disconnected state.
*/
export enum VoiceConnectionDisconnectReason {
/**
* When the WebSocket connection has been closed.
*/
WebSocketClose,
/**
* When the adapter was unable to send a message requested by the VoiceConnection.
*/
AdapterUnavailable,
/**
* When a VOICE_SERVER_UPDATE packet is received with a null endpoint, causing the connection to be severed.
*/
EndpointRemoved,
/**
* When a manual disconnect was requested.
*/
Manual,
}
/**
* The state that a VoiceConnection will be in when it is not connected to a Discord voice server nor is
* it attempting to connect. You can manually attempt to reconnect using VoiceConnection#reconnect.
*/
export interface VoiceConnectionDisconnectedBaseState {
status: VoiceConnectionStatus.Disconnected;
subscription?: PlayerSubscription;
adapter: DiscordGatewayAdapterImplementerMethods;
}
/**
* The state that a VoiceConnection will be in when it is not connected to a Discord voice server nor is
* it attempting to connect. You can manually attempt to reconnect using VoiceConnection#reconnect.
*/
export interface VoiceConnectionDisconnectedOtherState extends VoiceConnectionDisconnectedBaseState {
reason: Exclude<VoiceConnectionDisconnectReason, VoiceConnectionDisconnectReason.WebSocketClose>;
}
/**
* The state that a VoiceConnection will be in when its WebSocket connection was closed.
* You can manually attempt to reconnect using VoiceConnection#reconnect.
*/
export interface VoiceConnectionDisconnectedWebSocketState extends VoiceConnectionDisconnectedBaseState {
reason: VoiceConnectionDisconnectReason.WebSocketClose;
/**
* The close code of the WebSocket connection to the Discord voice server.
*/
closeCode: number;
}
/**
* The states that a VoiceConnection can be in when it is not connected to a Discord voice server nor is
* it attempting to connect. You can manually attempt to connect using VoiceConnection#reconnect.
*/
export type VoiceConnectionDisconnectedState =
| VoiceConnectionDisconnectedOtherState
| VoiceConnectionDisconnectedWebSocketState;
/**
* The state that a VoiceConnection will be in when it is establishing a connection to a Discord
* voice server.
*/
export interface VoiceConnectionConnectingState {
status: VoiceConnectionStatus.Connecting;
networking: Networking;
subscription?: PlayerSubscription;
adapter: DiscordGatewayAdapterImplementerMethods;
}
/**
* The state that a VoiceConnection will be in when it has an active connection to a Discord
* voice server.
*/
export interface VoiceConnectionReadyState {
status: VoiceConnectionStatus.Ready;
networking: Networking;
subscription?: PlayerSubscription;
adapter: DiscordGatewayAdapterImplementerMethods;
}
/**
* The state that a VoiceConnection will be in when it has been permanently been destroyed by the
* user and untracked by the library. It cannot be reconnected, instead, a new VoiceConnection
* needs to be established.
*/
export interface VoiceConnectionDestroyedState {
status: VoiceConnectionStatus.Destroyed;
}
/**
* The various states that a voice connection can be in.
*/
export type VoiceConnectionState =
| VoiceConnectionSignallingState
| VoiceConnectionDisconnectedState
| VoiceConnectionConnectingState
| VoiceConnectionReadyState
| VoiceConnectionDestroyedState;
export type VoiceConnectionEvents = {
error: (error: Error) => Awaited<void>;
debug: (message: string) => Awaited<void>;
stateChange: (oldState: VoiceConnectionState, newState: VoiceConnectionState) => Awaited<void>;
} & {
[status in VoiceConnectionStatus]: (
oldState: VoiceConnectionState,
newState: VoiceConnectionState & { status: status },
) => Awaited<void>;
};
/**
* A connection to the voice server of a Guild, can be used to play audio in voice channels.
*/
export class VoiceConnection extends TypedEmitter<VoiceConnectionEvents> {
/**
* The number of consecutive rejoin attempts. Initially 0, and increments for each rejoin.
* When a connection is successfully established, it resets to 0.
*/
public rejoinAttempts: number;
/**
* The state of the voice connection.
*/
private _state: VoiceConnectionState;
/**
* A configuration storing all the data needed to reconnect to a Guild's voice server.
*
* @internal
*/
public readonly joinConfig: JoinConfig;
/**
* The two packets needed to successfully establish a voice connection. They are received
* from the main Discord gateway after signalling to change the voice state.
*/
private readonly packets: {
server: GatewayVoiceServerUpdateDispatchData | undefined;
state: GatewayVoiceStateUpdateDispatchData | undefined;
};
/**
* The receiver of this voice connection. You should join the voice channel with `selfDeaf` set
* to false for this feature to work properly.
*/
public readonly receiver: VoiceReceiver;
/**
* The debug logger function, if debugging is enabled.
*/
private readonly debug: null | ((message: string) => void);
/**
* Creates a new voice connection.
*
* @param joinConfig - The data required to establish the voice connection
* @param options - The options used to create this voice connection
*/
public constructor(joinConfig: JoinConfig, { debug, adapterCreator }: CreateVoiceConnectionOptions) {
super();
this.debug = debug ? (message: string) => this.emit('debug', message) : null;
this.rejoinAttempts = 0;
this.receiver = new VoiceReceiver(this);
this.onNetworkingClose = this.onNetworkingClose.bind(this);
this.onNetworkingStateChange = this.onNetworkingStateChange.bind(this);
this.onNetworkingError = this.onNetworkingError.bind(this);
this.onNetworkingDebug = this.onNetworkingDebug.bind(this);
const adapter = adapterCreator({
onVoiceServerUpdate: (data) => this.addServerPacket(data),
onVoiceStateUpdate: (data) => this.addStatePacket(data),
destroy: () => this.destroy(false),
});
this._state = { status: VoiceConnectionStatus.Signalling, adapter };
this.packets = {
server: undefined,
state: undefined,
};
this.joinConfig = joinConfig;
}
/**
* The current state of the voice connection.
*/
public get state() {
return this._state;
}
/**
* Updates the state of the voice connection, performing clean-up operations where necessary.
*/
public set state(newState: VoiceConnectionState) {
const oldState = this._state;
const oldNetworking: Networking | undefined = Reflect.get(oldState, 'networking');
const newNetworking: Networking | undefined = Reflect.get(newState, 'networking');
const oldSubscription: PlayerSubscription | undefined = Reflect.get(oldState, 'subscription');
const newSubscription: PlayerSubscription | undefined = Reflect.get(newState, 'subscription');
if (oldNetworking !== newNetworking) {
if (oldNetworking) {
oldNetworking.on('error', noop);
oldNetworking.off('debug', this.onNetworkingDebug);
oldNetworking.off('error', this.onNetworkingError);
oldNetworking.off('close', this.onNetworkingClose);
oldNetworking.off('stateChange', this.onNetworkingStateChange);
oldNetworking.destroy();
}
if (newNetworking) this.updateReceiveBindings(newNetworking.state, oldNetworking?.state);
}
if (newState.status === VoiceConnectionStatus.Ready) {
this.rejoinAttempts = 0;
} else if (newState.status === VoiceConnectionStatus.Destroyed) {
for (const stream of this.receiver.subscriptions.values()) {
if (!stream.destroyed) stream.destroy();
}
}
// If destroyed, the adapter can also be destroyed so it can be cleaned up by the user
if (oldState.status !== VoiceConnectionStatus.Destroyed && newState.status === VoiceConnectionStatus.Destroyed) {
oldState.adapter.destroy();
}
this._state = newState;
if (oldSubscription && oldSubscription !== newSubscription) {
oldSubscription.unsubscribe();
}
this.emit('stateChange', oldState, newState);
if (oldState.status !== newState.status) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
this.emit(newState.status, oldState, newState as any);
}
}
/**
* Registers a `VOICE_SERVER_UPDATE` packet to the voice connection. This will cause it to reconnect using the
* new data provided in the packet.
*
* @param packet - The received `VOICE_SERVER_UPDATE` packet
*/
private addServerPacket(packet: GatewayVoiceServerUpdateDispatchData) {
this.packets.server = packet;
if (packet.endpoint) {
this.configureNetworking();
} else if (this.state.status !== VoiceConnectionStatus.Destroyed) {
this.state = {
...this.state,
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.EndpointRemoved,
};
}
}
/**
* Registers a `VOICE_STATE_UPDATE` packet to the voice connection. Most importantly, it stores the id of the
* channel that the client is connected to.
*
* @param packet - The received `VOICE_STATE_UPDATE` packet
*/
private addStatePacket(packet: GatewayVoiceStateUpdateDispatchData) {
this.packets.state = packet;
if (typeof packet.self_deaf !== 'undefined') this.joinConfig.selfDeaf = packet.self_deaf;
if (typeof packet.self_mute !== 'undefined') this.joinConfig.selfMute = packet.self_mute;
if (packet.channel_id) this.joinConfig.channelId = packet.channel_id;
/*
the channel_id being null doesn't necessarily mean it was intended for the client to leave the voice channel
as it may have disconnected due to network failure. This will be gracefully handled once the voice websocket
dies, and then it is up to the user to decide how they wish to handle this.
*/
}
/**
* Called when the networking state changes, and the new ws/udp packet/message handlers need to be rebound
* to the new instances.
* @param newState - The new networking state
* @param oldState - The old networking state, if there is one
*/
private updateReceiveBindings(newState: NetworkingState, oldState?: NetworkingState) {
const oldWs = Reflect.get(oldState ?? {}, 'ws') as VoiceWebSocket | undefined;
const newWs = Reflect.get(newState, 'ws') as VoiceWebSocket | undefined;
const oldUdp = Reflect.get(oldState ?? {}, 'udp') as VoiceUDPSocket | undefined;
const newUdp = Reflect.get(newState, 'udp') as VoiceUDPSocket | undefined;
if (oldWs !== newWs) {
oldWs?.off('packet', this.receiver.onWsPacket);
newWs?.on('packet', this.receiver.onWsPacket);
}
if (oldUdp !== newUdp) {
oldUdp?.off('message', this.receiver.onUdpMessage);
newUdp?.on('message', this.receiver.onUdpMessage);
}
this.receiver.connectionData = Reflect.get(newState, 'connectionData') ?? {};
}
/**
* Attempts to configure a networking instance for this voice connection using the received packets.
* Both packets are required, and any existing networking instance will be destroyed.
*
* @remarks
* This is called when the voice server of the connection changes, e.g. if the bot is moved into a
* different channel in the same guild but has a different voice server. In this instance, the connection
* needs to be re-established to the new voice server.
*
* The connection will transition to the Connecting state when this is called.
*/
public configureNetworking() {
const { server, state } = this.packets;
if (!server || !state || this.state.status === VoiceConnectionStatus.Destroyed || !server.endpoint) return;
const networking = new Networking(
{
endpoint: server.endpoint,
serverId: server.guild_id,
token: server.token,
sessionId: state.session_id,
userId: state.user_id,
},
Boolean(this.debug),
);
networking.once('close', this.onNetworkingClose);
networking.on('stateChange', this.onNetworkingStateChange);
networking.on('error', this.onNetworkingError);
networking.on('debug', this.onNetworkingDebug);
this.state = {
...this.state,
status: VoiceConnectionStatus.Connecting,
networking,
};
}
/**
* Called when the networking instance for this connection closes. If the close code is 4014 (do not reconnect),
* the voice connection will transition to the Disconnected state which will store the close code. You can
* decide whether or not to reconnect when this occurs by listening for the state change and calling reconnect().
*
* @remarks
* If the close code was anything other than 4014, it is likely that the closing was not intended, and so the
* VoiceConnection will signal to Discord that it would like to rejoin the channel. This automatically attempts
* to re-establish the connection. This would be seen as a transition from the Ready state to the Signalling state.
*
* @param code - The close code
*/
private onNetworkingClose(code: number) {
if (this.state.status === VoiceConnectionStatus.Destroyed) return;
// If networking closes, try to connect to the voice channel again.
if (code === 4014) {
// Disconnected - networking is already destroyed here
this.state = {
...this.state,
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.WebSocketClose,
closeCode: code,
};
} else {
this.state = {
...this.state,
status: VoiceConnectionStatus.Signalling,
};
this.rejoinAttempts++;
if (!this.state.adapter.sendPayload(createJoinVoiceChannelPayload(this.joinConfig))) {
this.state = {
...this.state,
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.AdapterUnavailable,
};
}
}
}
/**
* Called when the state of the networking instance changes. This is used to derive the state of the voice connection.
*
* @param oldState - The previous state
* @param newState - The new state
*/
private onNetworkingStateChange(oldState: NetworkingState, newState: NetworkingState) {
this.updateReceiveBindings(newState, oldState);
if (oldState.code === newState.code) return;
if (this.state.status !== VoiceConnectionStatus.Connecting && this.state.status !== VoiceConnectionStatus.Ready)
return;
if (newState.code === NetworkingStatusCode.Ready) {
this.state = {
...this.state,
status: VoiceConnectionStatus.Ready,
};
} else if (newState.code !== NetworkingStatusCode.Closed) {
this.state = {
...this.state,
status: VoiceConnectionStatus.Connecting,
};
}
}
/**
* Propagates errors from the underlying network instance.
*
* @param error - The error to propagate
*/
private onNetworkingError(error: Error) {
this.emit('error', error);
}
/**
* Propagates debug messages from the underlying network instance.
*
* @param message - The debug message to propagate
*/
private onNetworkingDebug(message: string) {
this.debug?.(`[NW] ${message}`);
}
/**
* Prepares an audio packet for dispatch.
*
* @param buffer - The Opus packet to prepare
*/
public prepareAudioPacket(buffer: Buffer) {
const state = this.state;
if (state.status !== VoiceConnectionStatus.Ready) return;
return state.networking.prepareAudioPacket(buffer);
}
/**
* Dispatches the previously prepared audio packet (if any)
*/
public dispatchAudio() {
const state = this.state;
if (state.status !== VoiceConnectionStatus.Ready) return;
return state.networking.dispatchAudio();
}
/**
* Prepares an audio packet and dispatches it immediately.
*
* @param buffer - The Opus packet to play
*/
public playOpusPacket(buffer: Buffer) {
const state = this.state;
if (state.status !== VoiceConnectionStatus.Ready) return;
state.networking.prepareAudioPacket(buffer);
return state.networking.dispatchAudio();
}
/**
* Destroys the VoiceConnection, preventing it from connecting to voice again.
* This method should be called when you no longer require the VoiceConnection to
* prevent memory leaks.
*
* @param adapterAvailable - Whether the adapter can be used
*/
public destroy(adapterAvailable = true) {
if (this.state.status === VoiceConnectionStatus.Destroyed) {
throw new Error('Cannot destroy VoiceConnection - it has already been destroyed');
}
if (getVoiceConnection(this.joinConfig.guildId) === this) {
untrackVoiceConnection(this);
}
if (adapterAvailable) {
this.state.adapter.sendPayload(createJoinVoiceChannelPayload({ ...this.joinConfig, channelId: null }));
}
this.state = {
status: VoiceConnectionStatus.Destroyed,
};
}
/**
* Disconnects the VoiceConnection, allowing the possibility of rejoining later on.
*
* @returns `true` if the connection was successfully disconnected
*/
public disconnect() {
if (
this.state.status === VoiceConnectionStatus.Destroyed ||
this.state.status === VoiceConnectionStatus.Signalling
) {
return false;
}
this.joinConfig.channelId = null;
if (!this.state.adapter.sendPayload(createJoinVoiceChannelPayload(this.joinConfig))) {
this.state = {
adapter: this.state.adapter,
subscription: this.state.subscription,
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.AdapterUnavailable,
};
return false;
}
this.state = {
adapter: this.state.adapter,
reason: VoiceConnectionDisconnectReason.Manual,
status: VoiceConnectionStatus.Disconnected,
};
return true;
}
/**
* Attempts to rejoin (better explanation soon:tm:)
*
* @remarks
* Calling this method successfully will automatically increment the `rejoinAttempts` counter,
* which you can use to inform whether or not you'd like to keep attempting to reconnect your
* voice connection.
*
* A state transition from Disconnected to Signalling will be observed when this is called.
*/
public rejoin(joinConfig?: Omit<JoinConfig, 'guildId' | 'group'>) {
if (this.state.status === VoiceConnectionStatus.Destroyed) {
return false;
}
const notReady = this.state.status !== VoiceConnectionStatus.Ready;
if (notReady) this.rejoinAttempts++;
Object.assign(this.joinConfig, joinConfig);
if (this.state.adapter.sendPayload(createJoinVoiceChannelPayload(this.joinConfig))) {
if (notReady) {
this.state = {
...this.state,
status: VoiceConnectionStatus.Signalling,
};
}
return true;
}
this.state = {
adapter: this.state.adapter,
subscription: this.state.subscription,
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.AdapterUnavailable,
};
return false;
}
/**
* Updates the speaking status of the voice connection. This is used when audio players are done playing audio,
* and need to signal that the connection is no longer playing audio.
*
* @param enabled - Whether or not to show as speaking
*/
public setSpeaking(enabled: boolean) {
if (this.state.status !== VoiceConnectionStatus.Ready) return false;
return this.state.networking.setSpeaking(enabled);
}
/**
* Subscribes to an audio player, allowing the player to play audio on this voice connection.
*
* @param player - The audio player to subscribe to
*
* @returns The created subscription
*/
public subscribe(player: AudioPlayer) {
if (this.state.status === VoiceConnectionStatus.Destroyed) return;
// eslint-disable-next-line @typescript-eslint/dot-notation
const subscription = player['subscribe'](this);
this.state = {
...this.state,
subscription,
};
return subscription;
}
/**
* The latest ping (in milliseconds) for the WebSocket connection and audio playback for this voice
* connection, if this data is available.
*
* @remarks
* For this data to be available, the VoiceConnection must be in the Ready state, and its underlying
* WebSocket connection and UDP socket must have had at least one ping-pong exchange.
*/
public get ping() {
if (
this.state.status === VoiceConnectionStatus.Ready &&
this.state.networking.state.code === NetworkingStatusCode.Ready
) {
return {
ws: this.state.networking.state.ws.ping,
udp: this.state.networking.state.udp.ping,
};
}
return {
ws: undefined,
udp: undefined,
};
}
/**
* Called when a subscription of this voice connection to an audio player is removed.
*
* @param subscription - The removed subscription
*/
// @ts-ignore
private onSubscriptionRemoved(subscription: PlayerSubscription) {
if (this.state.status !== VoiceConnectionStatus.Destroyed && this.state.subscription === subscription) {
this.state = {
...this.state,
subscription: undefined,
};
}
}
}
/**
* Creates a new voice connection.
*
* @param joinConfig - The data required to establish the voice connection
* @param options - The options to use when joining the voice channel
*/
export function createVoiceConnection(joinConfig: JoinConfig, options: CreateVoiceConnectionOptions) {
const payload = createJoinVoiceChannelPayload(joinConfig);
const existing = getVoiceConnection(joinConfig.guildId);
if (existing && existing.state.status !== VoiceConnectionStatus.Destroyed) {
if (existing.state.status === VoiceConnectionStatus.Disconnected) {
existing.rejoin({
channelId: joinConfig.channelId,
selfDeaf: joinConfig.selfDeaf,
selfMute: joinConfig.selfMute,
});
} else if (!existing.state.adapter.sendPayload(payload)) {
existing.state = {
...existing.state,
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.AdapterUnavailable,
};
}
return existing;
}
const voiceConnection = new VoiceConnection(joinConfig, options);
trackVoiceConnection(voiceConnection);
if (voiceConnection.state.status !== VoiceConnectionStatus.Destroyed) {
if (!voiceConnection.state.adapter.sendPayload(payload)) {
voiceConnection.state = {
...voiceConnection.state,
status: VoiceConnectionStatus.Disconnected,
reason: VoiceConnectionDisconnectReason.AdapterUnavailable,
};
}
}
return voiceConnection;
}

View File

@@ -0,0 +1,627 @@
import { addAudioPlayer, deleteAudioPlayer } from '../DataStore';
import { Awaited, noop } from '../util/util';
import { VoiceConnection, VoiceConnectionStatus } from '../VoiceConnection';
import { AudioPlayerError } from './AudioPlayerError';
import type { AudioResource } from './AudioResource';
import { PlayerSubscription } from './PlayerSubscription';
import { TypedEmitter } from 'tiny-typed-emitter';
// The Opus "silent" frame
export const SILENCE_FRAME = Buffer.from([0xf8, 0xff, 0xfe]);
/**
* Describes the behavior of the player when an audio packet is played but there are no available
* voice connections to play to.
*/
export enum NoSubscriberBehavior {
/**
* Pauses playing the stream until a voice connection becomes available.
*/
Pause = 'pause',
/**
* Continues to play through the resource regardless.
*/
Play = 'play',
/**
* The player stops and enters the Idle state.
*/
Stop = 'stop',
}
export enum AudioPlayerStatus {
/**
* When there is currently no resource for the player to be playing.
*/
Idle = 'idle',
/**
* When the player is waiting for an audio resource to become readable before transitioning to Playing.
*/
Buffering = 'buffering',
/**
* When the player has been manually paused.
*/
Paused = 'paused',
/**
* When the player is actively playing an audio resource.
*/
Playing = 'playing',
/**
* When the player has paused itself. Only possible with the "pause" no subscriber behavior.
*/
AutoPaused = 'autopaused',
}
/**
* Options that can be passed when creating an audio player, used to specify its behavior.
*/
export interface CreateAudioPlayerOptions {
debug?: boolean;
behaviors?: {
noSubscriber?: NoSubscriberBehavior;
maxMissedFrames?: number;
};
}
/**
* The state that an AudioPlayer is in when it has no resource to play. This is the starting state.
*/
export interface AudioPlayerIdleState {
status: AudioPlayerStatus.Idle;
}
/**
* The state that an AudioPlayer is in when it is waiting for a resource to become readable. Once this
* happens, the AudioPlayer will enter the Playing state. If the resource ends/errors before this, then
* it will re-enter the Idle state.
*/
export interface AudioPlayerBufferingState {
status: AudioPlayerStatus.Buffering;
/**
* The resource that the AudioPlayer is waiting for
*/
resource: AudioResource;
onReadableCallback: () => void;
onFailureCallback: () => void;
onStreamError: (error: Error) => void;
}
/**
* The state that an AudioPlayer is in when it is actively playing an AudioResource. When playback ends,
* it will enter the Idle state.
*/
export interface AudioPlayerPlayingState {
status: AudioPlayerStatus.Playing;
/**
* The number of consecutive times that the audio resource has been unable to provide an Opus frame.
*/
missedFrames: number;
/**
* The playback duration in milliseconds of the current audio resource. This includes filler silence packets
* that have been played when the resource was buffering.
*/
playbackDuration: number;
/**
* The resource that is being played.
*/
resource: AudioResource;
onStreamError: (error: Error) => void;
}
/**
* The state that an AudioPlayer is in when it has either been explicitly paused by the user, or done
* automatically by the AudioPlayer itself if there are no available subscribers.
*/
export interface AudioPlayerPausedState {
status: AudioPlayerStatus.Paused | AudioPlayerStatus.AutoPaused;
/**
* How many silence packets still need to be played to avoid audio interpolation due to the stream suddenly pausing.
*/
silencePacketsRemaining: number;
/**
* The playback duration in milliseconds of the current audio resource. This includes filler silence packets
* that have been played when the resource was buffering.
*/
playbackDuration: number;
/**
* The current resource of the audio player.
*/
resource: AudioResource;
onStreamError: (error: Error) => void;
}
/**
* The various states that the player can be in.
*/
export type AudioPlayerState =
| AudioPlayerIdleState
| AudioPlayerBufferingState
| AudioPlayerPlayingState
| AudioPlayerPausedState;
export type AudioPlayerEvents = {
error: (error: AudioPlayerError) => Awaited<void>;
debug: (message: string) => Awaited<void>;
stateChange: (oldState: AudioPlayerState, newState: AudioPlayerState) => Awaited<void>;
subscribe: (subscription: PlayerSubscription) => Awaited<void>;
unsubscribe: (subscription: PlayerSubscription) => Awaited<void>;
} & {
[status in AudioPlayerStatus]: (
oldState: AudioPlayerState,
newState: AudioPlayerState & { status: status },
) => Awaited<void>;
};
/**
* Used to play audio resources (i.e. tracks, streams) to voice connections.
*
* @remarks
* Audio players are designed to be re-used - even if a resource has finished playing, the player itself
* can still be used.
*
* The AudioPlayer drives the timing of playback, and therefore is unaffected by voice connections
* becoming unavailable. Its behavior in these scenarios can be configured.
*/
export class AudioPlayer extends TypedEmitter<AudioPlayerEvents> {
/**
* The state that the AudioPlayer is in.
*/
private _state: AudioPlayerState;
/**
* A list of VoiceConnections that are registered to this AudioPlayer. The player will attempt to play audio
* to the streams in this list.
*/
private readonly subscribers: PlayerSubscription[] = [];
/**
* The behavior that the player should follow when it enters certain situations.
*/
private readonly behaviors: {
noSubscriber: NoSubscriberBehavior;
maxMissedFrames: number;
};
/**
* The debug logger function, if debugging is enabled.
*/
private readonly debug: null | ((message: string) => void);
/**
* Creates a new AudioPlayer.
*/
public constructor(options: CreateAudioPlayerOptions = {}) {
super();
this._state = { status: AudioPlayerStatus.Idle };
this.behaviors = {
noSubscriber: NoSubscriberBehavior.Pause,
maxMissedFrames: 5,
...options.behaviors,
};
this.debug = options.debug === false ? null : (message: string) => this.emit('debug', message);
}
/**
* A list of subscribed voice connections that can currently receive audio to play.
*/
public get playable() {
return this.subscribers
.filter(({ connection }) => connection.state.status === VoiceConnectionStatus.Ready)
.map(({ connection }) => connection);
}
/**
* Subscribes a VoiceConnection to the audio player's play list. If the VoiceConnection is already subscribed,
* then the existing subscription is used.
*
* @remarks
* This method should not be directly called. Instead, use VoiceConnection#subscribe.
*
* @param connection - The connection to subscribe
*
* @returns The new subscription if the voice connection is not yet subscribed, otherwise the existing subscription
*/
// @ts-ignore
private subscribe(connection: VoiceConnection) {
const existingSubscription = this.subscribers.find((subscription) => subscription.connection === connection);
if (!existingSubscription) {
const subscription = new PlayerSubscription(connection, this);
this.subscribers.push(subscription);
setImmediate(() => this.emit('subscribe', subscription));
return subscription;
}
return existingSubscription;
}
/**
* Unsubscribes a subscription - i.e. removes a voice connection from the play list of the audio player.
*
* @remarks
* This method should not be directly called. Instead, use PlayerSubscription#unsubscribe.
*
* @param subscription - The subscription to remove
*
* @returns Whether or not the subscription existed on the player and was removed
*/
// @ts-ignore
private unsubscribe(subscription: PlayerSubscription) {
const index = this.subscribers.indexOf(subscription);
const exists = index !== -1;
if (exists) {
this.subscribers.splice(index, 1);
subscription.connection.setSpeaking(false);
this.emit('unsubscribe', subscription);
}
return exists;
}
/**
* The state that the player is in.
*/
public get state() {
return this._state;
}
/**
* Sets a new state for the player, performing clean-up operations where necessary.
*/
public set state(newState: AudioPlayerState) {
const oldState = this._state;
const newResource = Reflect.get(newState, 'resource') as AudioResource | undefined;
if (oldState.status !== AudioPlayerStatus.Idle && oldState.resource !== newResource) {
oldState.resource.playStream.on('error', noop);
oldState.resource.playStream.off('error', oldState.onStreamError);
oldState.resource.audioPlayer = undefined;
oldState.resource.playStream.destroy();
oldState.resource.playStream.read(); // required to ensure buffered data is drained, prevents memory leak
}
// When leaving the Buffering state (or buffering a new resource), then remove the event listeners from it
if (
oldState.status === AudioPlayerStatus.Buffering &&
(newState.status !== AudioPlayerStatus.Buffering || newState.resource !== oldState.resource)
) {
oldState.resource.playStream.off('end', oldState.onFailureCallback);
oldState.resource.playStream.off('close', oldState.onFailureCallback);
oldState.resource.playStream.off('finish', oldState.onFailureCallback);
oldState.resource.playStream.off('readable', oldState.onReadableCallback);
}
// transitioning into an idle should ensure that connections stop speaking
if (newState.status === AudioPlayerStatus.Idle) {
this._signalStopSpeaking();
deleteAudioPlayer(this);
}
// attach to the global audio player timer
if (newResource) {
addAudioPlayer(this);
}
// playing -> playing state changes should still transition if a resource changed (seems like it would be useful!)
const didChangeResources =
oldState.status !== AudioPlayerStatus.Idle &&
newState.status === AudioPlayerStatus.Playing &&
oldState.resource !== newState.resource;
this._state = newState;
this.emit('stateChange', oldState, this._state);
if (oldState.status !== newState.status || didChangeResources) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
this.emit(newState.status, oldState, this._state as any);
}
this.debug?.(`state change:\nfrom ${stringifyState(oldState)}\nto ${stringifyState(newState)}`);
}
/**
* Plays a new resource on the player. If the player is already playing a resource, the existing resource is destroyed
* (it cannot be reused, even in another player) and is replaced with the new resource.
*
* @remarks
* The player will transition to the Playing state once playback begins, and will return to the Idle state once
* playback is ended.
*
* If the player was previously playing a resource and this method is called, the player will not transition to the
* Idle state during the swap over.
*
* @param resource - The resource to play
*
* @throws Will throw if attempting to play an audio resource that has already ended, or is being played by another player
*/
public play<T>(resource: AudioResource<T>) {
if (resource.ended) {
throw new Error('Cannot play a resource that has already ended.');
}
if (resource.audioPlayer) {
if (resource.audioPlayer === this) {
return;
}
throw new Error('Resource is already being played by another audio player.');
}
resource.audioPlayer = this;
// Attach error listeners to the stream that will propagate the error and then return to the Idle
// state if the resource is still being used.
const onStreamError = (error: Error) => {
if (this.state.status !== AudioPlayerStatus.Idle) {
/**
* Emitted when there is an error emitted from the audio resource played by the audio player
*
* @event AudioPlayer#error
* @type {AudioPlayerError}
*/
this.emit('error', new AudioPlayerError(error, this.state.resource));
}
if (this.state.status !== AudioPlayerStatus.Idle && this.state.resource === resource) {
this.state = {
status: AudioPlayerStatus.Idle,
};
}
};
resource.playStream.once('error', onStreamError);
if (resource.started) {
this.state = {
status: AudioPlayerStatus.Playing,
missedFrames: 0,
playbackDuration: 0,
resource,
onStreamError,
};
} else {
const onReadableCallback = () => {
if (this.state.status === AudioPlayerStatus.Buffering && this.state.resource === resource) {
this.state = {
status: AudioPlayerStatus.Playing,
missedFrames: 0,
playbackDuration: 0,
resource,
onStreamError,
};
}
};
const onFailureCallback = () => {
if (this.state.status === AudioPlayerStatus.Buffering && this.state.resource === resource) {
this.state = {
status: AudioPlayerStatus.Idle,
};
}
};
resource.playStream.once('readable', onReadableCallback);
resource.playStream.once('end', onFailureCallback);
resource.playStream.once('close', onFailureCallback);
resource.playStream.once('finish', onFailureCallback);
this.state = {
status: AudioPlayerStatus.Buffering,
resource,
onReadableCallback,
onFailureCallback,
onStreamError,
};
}
}
/**
* Pauses playback of the current resource, if any.
*
* @param interpolateSilence - If true, the player will play 5 packets of silence after pausing to prevent audio glitches
*
* @returns `true` if the player was successfully paused, otherwise `false`
*/
public pause(interpolateSilence = true) {
if (this.state.status !== AudioPlayerStatus.Playing) return false;
this.state = {
...this.state,
status: AudioPlayerStatus.Paused,
silencePacketsRemaining: interpolateSilence ? 5 : 0,
};
return true;
}
/**
* Unpauses playback of the current resource, if any.
*
* @returns `true` if the player was successfully unpaused, otherwise `false`
*/
public unpause() {
if (this.state.status !== AudioPlayerStatus.Paused) return false;
this.state = {
...this.state,
status: AudioPlayerStatus.Playing,
missedFrames: 0,
};
return true;
}
/**
* Stops playback of the current resource and destroys the resource. The player will either transition to the Idle state,
* or remain in its current state until the silence padding frames of the resource have been played.
*
* @param force - If true, will force the player to enter the Idle state even if the resource has silence padding frames
*
* @returns `true` if the player will come to a stop, otherwise `false`
*/
public stop(force = false) {
if (this.state.status === AudioPlayerStatus.Idle) return false;
if (force || this.state.resource.silencePaddingFrames === 0) {
this.state = {
status: AudioPlayerStatus.Idle,
};
} else if (this.state.resource.silenceRemaining === -1) {
this.state.resource.silenceRemaining = this.state.resource.silencePaddingFrames;
}
return true;
}
/**
* Checks whether the underlying resource (if any) is playable (readable)
*
* @returns `true` if the resource is playable, otherwise `false`
*/
public checkPlayable() {
const state = this._state;
if (state.status === AudioPlayerStatus.Idle || state.status === AudioPlayerStatus.Buffering) return false;
// If the stream has been destroyed or is no longer readable, then transition to the Idle state.
if (!state.resource.readable) {
this.state = {
status: AudioPlayerStatus.Idle,
};
return false;
}
return true;
}
/**
* Called roughly every 20ms by the global audio player timer. Dispatches any audio packets that are buffered
* by the active connections of this audio player.
*/
// @ts-ignore
private _stepDispatch() {
const state = this._state;
// Guard against the Idle state
if (state.status === AudioPlayerStatus.Idle || state.status === AudioPlayerStatus.Buffering) return;
// Dispatch any audio packets that were prepared in the previous cycle
this.playable.forEach((connection) => connection.dispatchAudio());
}
/**
* Called roughly every 20ms by the global audio player timer. Attempts to read an audio packet from the
* underlying resource of the stream, and then has all the active connections of the audio player prepare it
* (encrypt it, append header data) so that it is ready to play at the start of the next cycle.
*/
// @ts-ignore
private _stepPrepare() {
const state = this._state;
// Guard against the Idle state
if (state.status === AudioPlayerStatus.Idle || state.status === AudioPlayerStatus.Buffering) return;
// List of connections that can receive the packet
const playable = this.playable;
/* If the player was previously in the AutoPaused state, check to see whether there are newly available
connections, allowing us to transition out of the AutoPaused state back into the Playing state */
if (state.status === AudioPlayerStatus.AutoPaused && playable.length > 0) {
this.state = {
...state,
status: AudioPlayerStatus.Playing,
missedFrames: 0,
};
}
/* If the player is (auto)paused, check to see whether silence packets should be played and
set a timeout to begin the next cycle, ending the current cycle here. */
if (state.status === AudioPlayerStatus.Paused || state.status === AudioPlayerStatus.AutoPaused) {
if (state.silencePacketsRemaining > 0) {
state.silencePacketsRemaining--;
this._preparePacket(SILENCE_FRAME, playable, state);
if (state.silencePacketsRemaining === 0) {
this._signalStopSpeaking();
}
}
return;
}
// If there are no available connections in this cycle, observe the configured "no subscriber" behavior.
if (playable.length === 0) {
if (this.behaviors.noSubscriber === NoSubscriberBehavior.Pause) {
this.state = {
...state,
status: AudioPlayerStatus.AutoPaused,
silencePacketsRemaining: 5,
};
return;
} else if (this.behaviors.noSubscriber === NoSubscriberBehavior.Stop) {
this.stop(true);
}
}
/**
* Attempt to read an Opus packet from the resource. If there isn't an available packet,
* play a silence packet. If there are 5 consecutive cycles with failed reads, then the
* playback will end.
*/
const packet: Buffer | null = state.resource.read();
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
if (state.status === AudioPlayerStatus.Playing) {
if (packet) {
this._preparePacket(packet, playable, state);
state.missedFrames = 0;
} else {
this._preparePacket(SILENCE_FRAME, playable, state);
state.missedFrames++;
if (state.missedFrames >= this.behaviors.maxMissedFrames) {
this.stop();
}
}
}
}
/**
* Signals to all the subscribed connections that they should send a packet to Discord indicating
* they are no longer speaking. Called once playback of a resource ends.
*/
private _signalStopSpeaking() {
return this.subscribers.forEach(({ connection }) => connection.setSpeaking(false));
}
/**
* Instructs the given connections to each prepare this packet to be played at the start of the
* next cycle.
*
* @param packet - The Opus packet to be prepared by each receiver
* @param receivers - The connections that should play this packet
*/
private _preparePacket(
packet: Buffer,
receivers: VoiceConnection[],
state: AudioPlayerPlayingState | AudioPlayerPausedState,
) {
state.playbackDuration += 20;
receivers.forEach((connection) => connection.prepareAudioPacket(packet));
}
}
/**
* Stringifies an AudioPlayerState instance.
*
* @param state - The state to stringify
*/
function stringifyState(state: AudioPlayerState) {
return JSON.stringify({
...state,
resource: Reflect.has(state, 'resource'),
stepTimeout: Reflect.has(state, 'stepTimeout'),
});
}
/**
* Creates a new AudioPlayer to be used.
*/
export function createAudioPlayer(options?: CreateAudioPlayerOptions) {
return new AudioPlayer(options);
}

View File

@@ -0,0 +1,18 @@
import type { AudioResource } from './AudioResource';
/**
* An error emitted by an AudioPlayer. Contains an attached resource to aid with
* debugging and identifying where the error came from.
*/
export class AudioPlayerError extends Error {
/**
* The resource associated with the audio player at the time the error was thrown.
*/
public readonly resource: AudioResource;
public constructor(error: Error, resource: AudioResource) {
super(error.message);
this.resource = resource;
this.name = error.name;
this.stack = error.stack;
}
}

View File

@@ -0,0 +1,285 @@
import { Edge, findPipeline, StreamType, TransformerType } from './TransformerGraph';
import { pipeline, Readable } from 'node:stream';
import { noop } from '../util/util';
import prism from 'prism-media';
import { AudioPlayer, SILENCE_FRAME } from './AudioPlayer';
/**
* Options that are set when creating a new audio resource.
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export interface CreateAudioResourceOptions<T> {
/**
* The type of the input stream. Defaults to `StreamType.Arbitrary`.
*/
inputType?: StreamType;
/**
* Optional metadata that can be attached to the resource (e.g. track title, random id).
* This is useful for identification purposes when the resource is passed around in events.
* See {@link AudioResource.metadata}
*/
metadata?: T;
/**
* Whether or not inline volume should be enabled. If enabled, you will be able to change the volume
* of the stream on-the-fly. However, this also increases the performance cost of playback. Defaults to `false`.
*/
inlineVolume?: boolean;
/**
* The number of silence frames to append to the end of the resource's audio stream, to prevent interpolation glitches.
* Defaults to 5.
*/
silencePaddingFrames?: number;
}
/**
* Represents an audio resource that can be played by an audio player.
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export class AudioResource<T = unknown> {
/**
* An object-mode Readable stream that emits Opus packets. This is what is played by audio players.
*/
public readonly playStream: Readable;
/**
* The pipeline used to convert the input stream into a playable format. For example, this may
* contain an FFmpeg component for arbitrary inputs, and it may contain a VolumeTransformer component
* for resources with inline volume transformation enabled.
*/
public readonly edges: readonly Edge[];
/**
* Optional metadata that can be used to identify the resource.
*/
public metadata: T;
/**
* If the resource was created with inline volume transformation enabled, then this will be a
* prism-media VolumeTransformer. You can use this to alter the volume of the stream.
*/
public readonly volume?: prism.VolumeTransformer;
/**
* If using an Opus encoder to create this audio resource, then this will be a prism-media opus.Encoder.
* You can use this to control settings such as bitrate, FEC, PLP.
*/
public readonly encoder?: prism.opus.Encoder;
/**
* The audio player that the resource is subscribed to, if any.
*/
public audioPlayer?: AudioPlayer;
/**
* The playback duration of this audio resource, given in milliseconds.
*/
public playbackDuration = 0;
/**
* Whether or not the stream for this resource has started (data has become readable)
*/
public started = false;
/**
* The number of silence frames to append to the end of the resource's audio stream, to prevent interpolation glitches.
*/
public readonly silencePaddingFrames: number;
/**
* The number of remaining silence frames to play. If -1, the frames have not yet started playing.
*/
public silenceRemaining = -1;
public constructor(edges: readonly Edge[], streams: readonly Readable[], metadata: T, silencePaddingFrames: number) {
this.edges = edges;
this.playStream = streams.length > 1 ? (pipeline(streams, noop) as any as Readable) : streams[0];
this.metadata = metadata;
this.silencePaddingFrames = silencePaddingFrames;
for (const stream of streams) {
if (stream instanceof prism.VolumeTransformer) {
this.volume = stream;
} else if (stream instanceof prism.opus.Encoder) {
this.encoder = stream;
}
}
this.playStream.once('readable', () => (this.started = true));
}
/**
* Whether this resource is readable. If the underlying resource is no longer readable, this will still return true
* while there are silence padding frames left to play.
*/
public get readable() {
if (this.silenceRemaining === 0) return false;
const real = this.playStream.readable;
if (!real) {
if (this.silenceRemaining === -1) this.silenceRemaining = this.silencePaddingFrames;
return this.silenceRemaining !== 0;
}
return real;
}
/**
* Whether this resource has ended or not.
*/
public get ended() {
return this.playStream.readableEnded || this.playStream.destroyed || this.silenceRemaining === 0;
}
/**
* Attempts to read an Opus packet from the audio resource. If a packet is available, the playbackDuration
* is incremented.
*
* @remarks
* It is advisable to check that the playStream is readable before calling this method. While no runtime
* errors will be thrown, you should check that the resource is still available before attempting to
* read from it.
*
* @internal
*/
public read(): Buffer | null {
if (this.silenceRemaining === 0) {
return null;
} else if (this.silenceRemaining > 0) {
this.silenceRemaining--;
return SILENCE_FRAME;
}
const packet: Buffer | null = this.playStream.read();
if (packet) {
this.playbackDuration += 20;
}
return packet;
}
}
/**
* Ensures that a path contains at least one volume transforming component.
*
* @param path - The path to validate constraints on
*/
export const VOLUME_CONSTRAINT = (path: Edge[]) => path.some((edge) => edge.type === TransformerType.InlineVolume);
export const NO_CONSTRAINT = () => true;
/**
* Tries to infer the type of a stream to aid with transcoder pipelining.
*
* @param stream - The stream to infer the type of
*/
export function inferStreamType(stream: Readable): {
streamType: StreamType;
hasVolume: boolean;
} {
if (stream instanceof prism.opus.Encoder) {
return { streamType: StreamType.Opus, hasVolume: false };
} else if (stream instanceof prism.opus.Decoder) {
return { streamType: StreamType.Raw, hasVolume: false };
} else if (stream instanceof prism.VolumeTransformer) {
return { streamType: StreamType.Raw, hasVolume: true };
} else if (stream instanceof prism.opus.OggDemuxer) {
return { streamType: StreamType.Opus, hasVolume: false };
} else if (stream instanceof prism.opus.WebmDemuxer) {
return { streamType: StreamType.Opus, hasVolume: false };
}
return { streamType: StreamType.Arbitrary, hasVolume: false };
}
/**
* Creates an audio resource that can be played by audio players.
*
* @remarks
* If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
*
* If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
* to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
* Opus transcoders, and Ogg/WebM demuxers.
*
* @param input - The resource to play
* @param options - Configurable options for creating the resource
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export function createAudioResource<T>(
input: string | Readable,
options: CreateAudioResourceOptions<T> &
Pick<
T extends null | undefined ? CreateAudioResourceOptions<T> : Required<CreateAudioResourceOptions<T>>,
'metadata'
>,
): AudioResource<T extends null | undefined ? null : T>;
/**
* Creates an audio resource that can be played by audio players.
*
* @remarks
* If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
*
* If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
* to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
* Opus transcoders, and Ogg/WebM demuxers.
*
* @param input - The resource to play
* @param options - Configurable options for creating the resource
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export function createAudioResource<T extends null | undefined>(
input: string | Readable,
options?: Omit<CreateAudioResourceOptions<T>, 'metadata'>,
): AudioResource<null>;
/**
* Creates an audio resource that can be played by audio players.
*
* @remarks
* If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
*
* If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
* to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
* Opus transcoders, and Ogg/WebM demuxers.
*
* @param input - The resource to play
* @param options - Configurable options for creating the resource
*
* @template T - the type for the metadata (if any) of the audio resource
*/
export function createAudioResource<T>(
input: string | Readable,
options: CreateAudioResourceOptions<T> = {},
): AudioResource<T> {
let inputType = options.inputType;
let needsInlineVolume = Boolean(options.inlineVolume);
// string inputs can only be used with FFmpeg
if (typeof input === 'string') {
inputType = StreamType.Arbitrary;
} else if (typeof inputType === 'undefined') {
const analysis = inferStreamType(input);
inputType = analysis.streamType;
needsInlineVolume = needsInlineVolume && !analysis.hasVolume;
}
const transformerPipeline = findPipeline(inputType, needsInlineVolume ? VOLUME_CONSTRAINT : NO_CONSTRAINT);
if (transformerPipeline.length === 0) {
if (typeof input === 'string') throw new Error(`Invalid pipeline constructed for string resource '${input}'`);
// No adjustments required
return new AudioResource<T>([], [input], (options.metadata ?? null) as T, options.silencePaddingFrames ?? 5);
}
const streams = transformerPipeline.map((edge) => edge.transformer(input));
if (typeof input !== 'string') streams.unshift(input);
return new AudioResource<T>(
transformerPipeline,
streams,
(options.metadata ?? null) as T,
options.silencePaddingFrames ?? 5,
);
}

View File

@@ -0,0 +1,33 @@
/* eslint-disable @typescript-eslint/dot-notation */
import type { VoiceConnection } from '../VoiceConnection';
import type { AudioPlayer } from './AudioPlayer';
/**
* Represents a subscription of a voice connection to an audio player, allowing
* the audio player to play audio on the voice connection.
*/
export class PlayerSubscription {
/**
* The voice connection of this subscription.
*/
public readonly connection: VoiceConnection;
/**
* The audio player of this subscription.
*/
public readonly player: AudioPlayer;
public constructor(connection: VoiceConnection, player: AudioPlayer) {
this.connection = connection;
this.player = player;
}
/**
* Unsubscribes the connection from the audio player, meaning that the
* audio player cannot stream audio to it until a new subscription is made.
*/
public unsubscribe() {
this.connection['onSubscriptionRemoved'](this);
this.player['unsubscribe'](this);
}
}

View File

@@ -0,0 +1,264 @@
import type { Readable } from 'node:stream';
import prism from 'prism-media';
/**
* This module creates a Transformer Graph to figure out what the most efficient way
* of transforming the input stream into something playable would be.
*/
const FFMPEG_PCM_ARGUMENTS = ['-analyzeduration', '0', '-loglevel', '0', '-f', 's16le', '-ar', '48000', '-ac', '2'];
const FFMPEG_OPUS_ARGUMENTS = [
'-analyzeduration',
'0',
'-loglevel',
'0',
'-acodec',
'libopus',
'-f',
'opus',
'-ar',
'48000',
'-ac',
'2',
];
/**
* The different types of stream that can exist within the pipeline.
*
* @remarks
* - `Arbitrary` - the type of the stream at this point is unknown.
* - `Raw` - the stream at this point is s16le PCM.
* - `OggOpus` - the stream at this point is Opus audio encoded in an Ogg wrapper.
* - `WebmOpus` - the stream at this point is Opus audio encoded in a WebM wrapper.
* - `Opus` - the stream at this point is Opus audio, and the stream is in object-mode. This is ready to play.
*/
export enum StreamType {
Arbitrary = 'arbitrary',
Raw = 'raw',
OggOpus = 'ogg/opus',
WebmOpus = 'webm/opus',
Opus = 'opus',
}
/**
* The different types of transformers that can exist within the pipeline.
*/
export enum TransformerType {
FFmpegPCM = 'ffmpeg pcm',
FFmpegOgg = 'ffmpeg ogg',
OpusEncoder = 'opus encoder',
OpusDecoder = 'opus decoder',
OggOpusDemuxer = 'ogg/opus demuxer',
WebmOpusDemuxer = 'webm/opus demuxer',
InlineVolume = 'volume transformer',
}
/**
* Represents a pathway from one stream type to another using a transformer.
*/
export interface Edge {
from: Node;
to: Node;
cost: number;
transformer: (input: string | Readable) => Readable;
type: TransformerType;
}
/**
* Represents a type of stream within the graph, e.g. an Opus stream, or a stream of raw audio.
*/
export class Node {
/**
* The outbound edges from this node.
*/
public readonly edges: Edge[] = [];
/**
* The type of stream for this node.
*/
public readonly type: StreamType;
public constructor(type: StreamType) {
this.type = type;
}
/**
* Creates an outbound edge from this node.
*
* @param edge - The edge to create
*/
public addEdge(edge: Omit<Edge, 'from'>) {
this.edges.push({ ...edge, from: this });
}
}
// Create a node for each stream type
const NODES = new Map<StreamType, Node>();
for (const streamType of Object.values(StreamType)) {
NODES.set(streamType, new Node(streamType));
}
/**
* Gets a node from its stream type.
*
* @param type - The stream type of the target node
*/
export function getNode(type: StreamType) {
const node = NODES.get(type);
if (!node) throw new Error(`Node type '${type}' does not exist!`);
return node;
}
getNode(StreamType.Raw).addEdge({
type: TransformerType.OpusEncoder,
to: getNode(StreamType.Opus),
cost: 1.5,
transformer: () => new prism.opus.Encoder({ rate: 48000, channels: 2, frameSize: 960 }),
});
getNode(StreamType.Opus).addEdge({
type: TransformerType.OpusDecoder,
to: getNode(StreamType.Raw),
cost: 1.5,
transformer: () => new prism.opus.Decoder({ rate: 48000, channels: 2, frameSize: 960 }),
});
getNode(StreamType.OggOpus).addEdge({
type: TransformerType.OggOpusDemuxer,
to: getNode(StreamType.Opus),
cost: 1,
transformer: () => new prism.opus.OggDemuxer(),
});
getNode(StreamType.WebmOpus).addEdge({
type: TransformerType.WebmOpusDemuxer,
to: getNode(StreamType.Opus),
cost: 1,
transformer: () => new prism.opus.WebmDemuxer(),
});
const FFMPEG_PCM_EDGE: Omit<Edge, 'from'> = {
type: TransformerType.FFmpegPCM,
to: getNode(StreamType.Raw),
cost: 2,
transformer: (input) =>
new prism.FFmpeg({
args: typeof input === 'string' ? ['-i', input, ...FFMPEG_PCM_ARGUMENTS] : FFMPEG_PCM_ARGUMENTS,
}),
};
getNode(StreamType.Arbitrary).addEdge(FFMPEG_PCM_EDGE);
getNode(StreamType.OggOpus).addEdge(FFMPEG_PCM_EDGE);
getNode(StreamType.WebmOpus).addEdge(FFMPEG_PCM_EDGE);
getNode(StreamType.Raw).addEdge({
type: TransformerType.InlineVolume,
to: getNode(StreamType.Raw),
cost: 0.5,
transformer: () => new prism.VolumeTransformer({ type: 's16le' }),
});
// Try to enable FFmpeg Ogg optimizations
function canEnableFFmpegOptimizations(): boolean {
try {
return prism.FFmpeg.getInfo().output.includes('--enable-libopus');
} catch {}
return false;
}
if (canEnableFFmpegOptimizations()) {
const FFMPEG_OGG_EDGE: Omit<Edge, 'from'> = {
type: TransformerType.FFmpegOgg,
to: getNode(StreamType.OggOpus),
cost: 2,
transformer: (input) =>
new prism.FFmpeg({
args: typeof input === 'string' ? ['-i', input, ...FFMPEG_OPUS_ARGUMENTS] : FFMPEG_OPUS_ARGUMENTS,
}),
};
getNode(StreamType.Arbitrary).addEdge(FFMPEG_OGG_EDGE);
// Include Ogg and WebM as well in case they have different sampling rates or are mono instead of stereo
// at the moment, this will not do anything. However, if/when detection for correct Opus headers is
// implemented, this will help inform the voice engine that it is able to transcode the audio.
getNode(StreamType.OggOpus).addEdge(FFMPEG_OGG_EDGE);
getNode(StreamType.WebmOpus).addEdge(FFMPEG_OGG_EDGE);
}
/**
* Represents a step in the path from node A to node B.
*/
interface Step {
/**
* The next step.
*/
next?: Step;
/**
* The cost of the steps after this step.
*/
cost: number;
/**
* The edge associated with this step.
*/
edge?: Edge;
}
/**
* Finds the shortest cost path from node A to node B.
*
* @param from - The start node
* @param constraints - Extra validation for a potential solution. Takes a path, returns true if the path is valid
* @param goal - The target node
* @param path - The running path
* @param depth - The number of remaining recursions
*/
function findPath(
from: Node,
constraints: (path: Edge[]) => boolean,
goal = getNode(StreamType.Opus),
path: Edge[] = [],
depth = 5,
): Step {
if (from === goal && constraints(path)) {
return { cost: 0 };
} else if (depth === 0) {
return { cost: Infinity };
}
let currentBest: Step | undefined = undefined;
for (const edge of from.edges) {
if (currentBest && edge.cost > currentBest.cost) continue;
const next = findPath(edge.to, constraints, goal, [...path, edge], depth - 1);
const cost = edge.cost + next.cost;
if (!currentBest || cost < currentBest.cost) {
currentBest = { cost, edge, next };
}
}
return currentBest ?? { cost: Infinity };
}
/**
* Takes the solution from findPath and assembles it into a list of edges.
*
* @param step - The first step of the path
*/
function constructPipeline(step: Step) {
const edges = [];
let current: Step | undefined = step;
while (current?.edge) {
edges.push(current.edge);
current = current.next;
}
return edges;
}
/**
* Finds the lowest-cost pipeline to convert the input stream type into an Opus stream.
*
* @param from - The stream type to start from
* @param constraint - Extra constraints that may be imposed on potential solution
*/
export function findPipeline(from: StreamType, constraint: (path: Edge[]) => boolean) {
return constructPipeline(findPath(getNode(from), constraint));
}

View File

@@ -0,0 +1,390 @@
/* eslint-disable @typescript-eslint/dot-notation */
import { AudioResource } from '../../audio/AudioResource';
import { createAudioPlayer, AudioPlayerStatus, AudioPlayer, SILENCE_FRAME } from '../AudioPlayer';
import { Readable } from 'node:stream';
import { addAudioPlayer, deleteAudioPlayer } from '../../DataStore';
import { NoSubscriberBehavior } from '../..';
import { VoiceConnection, VoiceConnectionStatus } from '../../VoiceConnection';
import { once } from 'node:events';
import { AudioPlayerError } from '../AudioPlayerError';
jest.mock('../../DataStore');
jest.mock('../../VoiceConnection');
jest.mock('../AudioPlayerError');
const addAudioPlayerMock = addAudioPlayer as unknown as jest.Mock<typeof addAudioPlayer>;
const deleteAudioPlayerMock = deleteAudioPlayer as unknown as jest.Mock<typeof deleteAudioPlayer>;
const AudioPlayerErrorMock = AudioPlayerError as unknown as jest.Mock<typeof AudioPlayerError>;
const VoiceConnectionMock = VoiceConnection as unknown as jest.Mock<VoiceConnection>;
function* silence() {
// eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
while (true) {
yield Buffer.from([0xf8, 0xff, 0xfe]);
}
}
function createVoiceConnectionMock() {
const connection = new VoiceConnectionMock();
connection.state = {
status: VoiceConnectionStatus.Signalling,
adapter: {
sendPayload: jest.fn(),
destroy: jest.fn(),
},
};
connection.subscribe = jest.fn((player) => player['subscribe'](connection));
return connection;
}
function wait() {
return new Promise((resolve) => process.nextTick(resolve));
}
async function started(resource: AudioResource) {
while (!resource.started) {
await wait();
}
return resource;
}
let player: AudioPlayer | undefined;
beforeEach(() => {
AudioPlayerErrorMock.mockReset();
VoiceConnectionMock.mockReset();
addAudioPlayerMock.mockReset();
deleteAudioPlayerMock.mockReset();
});
afterEach(() => {
player?.stop(true);
});
describe('State transitions', () => {
test('Starts in Idle state', () => {
player = createAudioPlayer();
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(addAudioPlayerMock).toBeCalledTimes(0);
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
});
test('Playing resource with pausing and resuming', async () => {
// Call AudioResource constructor directly to avoid analysing pipeline for stream
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer();
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
// Pause and unpause should not affect the status of an Idle player
expect(player.pause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(player.unpause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(addAudioPlayerMock).toBeCalledTimes(0);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
// Expect pause() to return true and transition to paused state
expect(player.pause()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Paused);
// further calls to pause() should be unsuccessful
expect(player.pause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Paused);
// unpause() should transition back to Playing
expect(player.unpause()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
// further calls to unpause() should be unsuccessful
expect(player.unpause()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
// The audio player should not have been deleted throughout these changes
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
});
test('Playing to Stopping', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer();
// stop() shouldn't do anything in Idle state
expect(player.stop(true)).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
expect(player.stop()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(deleteAudioPlayerMock).toBeCalledTimes(0);
expect(resource.silenceRemaining).toBe(5);
});
test('Buffering to Playing', async () => {
const resource = new AudioResource([], [Readable.from(silence())], null, 5);
player = createAudioPlayer();
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Buffering);
await started(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toHaveBeenCalled();
expect(deleteAudioPlayerMock).not.toHaveBeenCalled();
});
describe('NoSubscriberBehavior transitions', () => {
test('NoSubscriberBehavior.Pause', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer({ behaviors: { noSubscriber: NoSubscriberBehavior.Pause } });
connection.subscribe(player);
player.play(resource);
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.AutoPaused);
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
});
test('NoSubscriberBehavior.Play', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer({ behaviors: { noSubscriber: NoSubscriberBehavior.Play } });
player.play(resource);
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
});
test('NoSubscriberBehavior.Stop', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer({ behaviors: { noSubscriber: NoSubscriberBehavior.Stop } });
player.play(resource);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
player['_stepPrepare']();
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(deleteAudioPlayerMock).toBeCalledTimes(1);
});
});
test('Normal playing state', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
const buffer = Buffer.from([1, 2, 4, 8]);
const resource = await started(
new AudioResource([], [Readable.from([buffer, buffer, buffer, buffer, buffer])], null, 5),
);
player = createAudioPlayer();
connection.subscribe(player);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
// Run through a few packet cycles
for (let i = 1; i <= 5; i++) {
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(i);
await wait(); // Wait for the stream
player['_stepPrepare']();
expect(connection.prepareAudioPacket).toHaveBeenCalledTimes(i);
expect(connection.prepareAudioPacket).toHaveBeenLastCalledWith(buffer);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
if (player.state.status === AudioPlayerStatus.Playing) {
expect(player.state.playbackDuration).toStrictEqual(i * 20);
}
}
// Expect silence to be played
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(6);
await wait();
player['_stepPrepare']();
const prepareAudioPacket = connection.prepareAudioPacket as unknown as jest.Mock<
typeof connection.prepareAudioPacket
>;
expect(prepareAudioPacket).toHaveBeenCalledTimes(6);
expect(prepareAudioPacket.mock.calls[5][0]).toEqual(silence().next().value);
player.stop(true);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(connection.setSpeaking).toBeCalledTimes(1);
expect(connection.setSpeaking).toHaveBeenLastCalledWith(false);
expect(deleteAudioPlayerMock).toHaveBeenCalledTimes(1);
});
test('stop() causes resource to use silence padding frames', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
const buffer = Buffer.from([1, 2, 4, 8]);
const resource = await started(
new AudioResource([], [Readable.from([buffer, buffer, buffer, buffer, buffer])], null, 5),
);
player = createAudioPlayer();
connection.subscribe(player);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
player.stop();
// Run through a few packet cycles
for (let i = 1; i <= 5; i++) {
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(i);
await wait(); // Wait for the stream
player['_stepPrepare']();
expect(connection.prepareAudioPacket).toHaveBeenCalledTimes(i);
expect(connection.prepareAudioPacket).toHaveBeenLastCalledWith(SILENCE_FRAME);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
if (player.state.status === AudioPlayerStatus.Playing) {
expect(player.state.playbackDuration).toStrictEqual(i * 20);
}
}
await wait();
expect(player.checkPlayable()).toBe(false);
const prepareAudioPacket = connection.prepareAudioPacket as unknown as jest.Mock<
typeof connection.prepareAudioPacket
>;
expect(prepareAudioPacket).toHaveBeenCalledTimes(5);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(connection.setSpeaking).toBeCalledTimes(1);
expect(connection.setSpeaking).toHaveBeenLastCalledWith(false);
expect(deleteAudioPlayerMock).toHaveBeenCalledTimes(1);
});
test('Plays silence 5 times for unreadable stream before quitting', async () => {
const connection = createVoiceConnectionMock();
if (connection.state.status !== VoiceConnectionStatus.Signalling) {
throw new Error('Voice connection should have been Signalling');
}
connection.state = {
...connection.state,
status: VoiceConnectionStatus.Ready,
networking: null as any,
};
const resource = await started(new AudioResource([], [Readable.from([1])], null, 0));
resource.playStream.read();
player = createAudioPlayer({ behaviors: { maxMissedFrames: 5 } });
connection.subscribe(player);
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
expect(addAudioPlayerMock).toBeCalledTimes(1);
expect(player.checkPlayable()).toBe(true);
const prepareAudioPacket = connection.prepareAudioPacket as unknown as jest.Mock<
typeof connection.prepareAudioPacket
>;
// Run through a few packet cycles
for (let i = 1; i <= 5; i++) {
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
if (player.state.status !== AudioPlayerStatus.Playing) throw new Error('Error');
expect(player.state.playbackDuration).toStrictEqual((i - 1) * 20);
expect(player.state.missedFrames).toBe(i - 1);
player['_stepDispatch']();
expect(connection.dispatchAudio).toHaveBeenCalledTimes(i);
player['_stepPrepare']();
expect(prepareAudioPacket).toHaveBeenCalledTimes(i);
expect(prepareAudioPacket.mock.calls[i - 1][0]).toEqual(silence().next().value);
}
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(connection.setSpeaking).toBeCalledTimes(1);
expect(connection.setSpeaking).toHaveBeenLastCalledWith(false);
expect(deleteAudioPlayerMock).toHaveBeenCalledTimes(1);
});
test('checkPlayable() transitions to Idle for unreadable stream', async () => {
const resource = await started(new AudioResource([], [Readable.from([1])], null, 0));
player = createAudioPlayer();
player.play(resource);
expect(player.checkPlayable()).toBe(true);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
for (let i = 0; i < 3; i++) {
resource.playStream.read();
await wait();
}
expect(resource.playStream.readableEnded).toBe(true);
expect(player.checkPlayable()).toBe(false);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
});
});
test('play() throws when playing a resource that has already ended', async () => {
const resource = await started(new AudioResource([], [Readable.from([1])], null, 5));
player = createAudioPlayer();
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
for (let i = 0; i < 3; i++) {
resource.playStream.read();
await wait();
}
expect(resource.playStream.readableEnded).toBe(true);
player.stop(true);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
expect(() => player.play(resource)).toThrow();
});
test('Propagates errors from streams', async () => {
const resource = await started(new AudioResource([], [Readable.from(silence())], null, 5));
player = createAudioPlayer();
player.play(resource);
expect(player.state.status).toBe(AudioPlayerStatus.Playing);
const error = new Error('AudioPlayer test error');
process.nextTick(() => resource.playStream.emit('error', error));
const res = await once(player, 'error');
const playerError = res[0] as AudioPlayerError;
expect(playerError).toBeInstanceOf(AudioPlayerError);
expect(AudioPlayerErrorMock).toHaveBeenCalledWith(error, resource);
expect(player.state.status).toBe(AudioPlayerStatus.Idle);
});

View File

@@ -0,0 +1,124 @@
import { opus, VolumeTransformer } from 'prism-media';
import { PassThrough, Readable } from 'node:stream';
import { SILENCE_FRAME } from '../AudioPlayer';
import { AudioResource, createAudioResource, NO_CONSTRAINT, VOLUME_CONSTRAINT } from '../AudioResource';
import { Edge, findPipeline as _findPipeline, StreamType, TransformerType } from '../TransformerGraph';
jest.mock('prism-media');
jest.mock('../TransformerGraph');
function wait() {
return new Promise((resolve) => process.nextTick(resolve));
}
async function started(resource: AudioResource) {
while (!resource.started) {
await wait();
}
return resource;
}
const findPipeline = _findPipeline as unknown as jest.MockedFunction<typeof _findPipeline>;
beforeAll(() => {
findPipeline.mockImplementation((from: StreamType, constraint: (path: Edge[]) => boolean) => {
const base = [
{
cost: 1,
transformer: () => new PassThrough(),
type: TransformerType.FFmpegPCM,
},
];
if (constraint === VOLUME_CONSTRAINT) {
base.push({
cost: 1,
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
transformer: () => new VolumeTransformer({} as any),
type: TransformerType.InlineVolume,
});
}
return base as any[];
});
});
beforeEach(() => {
findPipeline.mockClear();
});
describe('createAudioResource', () => {
test('Creates a resource from string path', () => {
const resource = createAudioResource('mypath.mp3');
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Creates a resource from string path (volume)', () => {
const resource = createAudioResource('mypath.mp3', { inlineVolume: true });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, VOLUME_CONSTRAINT);
expect(resource.volume).toBeInstanceOf(VolumeTransformer);
});
test('Only infers type if not explicitly given', () => {
const resource = createAudioResource(new opus.Encoder(), { inputType: StreamType.Arbitrary });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Infers from opus.Encoder', () => {
const resource = createAudioResource(new opus.Encoder(), { inlineVolume: true });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Opus, VOLUME_CONSTRAINT);
expect(resource.volume).toBeInstanceOf(VolumeTransformer);
expect(resource.encoder).toBeInstanceOf(opus.Encoder);
});
test('Infers from opus.OggDemuxer', () => {
const resource = createAudioResource(new opus.OggDemuxer());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Opus, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
expect(resource.encoder).toBeUndefined();
});
test('Infers from opus.WebmDemuxer', () => {
const resource = createAudioResource(new opus.WebmDemuxer());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Opus, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Infers from opus.Decoder', () => {
const resource = createAudioResource(new opus.Decoder());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Raw, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Infers from VolumeTransformer', () => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
const stream = new VolumeTransformer({} as any);
const resource = createAudioResource(stream, { inlineVolume: true });
expect(findPipeline).toHaveBeenCalledWith(StreamType.Raw, NO_CONSTRAINT);
expect(resource.volume).toBe(stream);
});
test('Falls back to Arbitrary for unknown stream type', () => {
const resource = createAudioResource(new PassThrough());
expect(findPipeline).toHaveBeenCalledWith(StreamType.Arbitrary, NO_CONSTRAINT);
expect(resource.volume).toBeUndefined();
});
test('Appends silence frames when ended', async () => {
const stream = Readable.from(Buffer.from([1]));
const resource = new AudioResource([], [stream], null, 5);
await started(resource);
expect(resource.readable).toBe(true);
expect(resource.read()).toEqual(Buffer.from([1]));
for (let i = 0; i < 5; i++) {
await wait();
expect(resource.readable).toBe(true);
expect(resource.read()).toBe(SILENCE_FRAME);
}
await wait();
expect(resource.readable).toBe(false);
expect(resource.read()).toBe(null);
});
});

View File

@@ -0,0 +1,49 @@
import { Edge, findPipeline, StreamType, TransformerType } from '../TransformerGraph';
const noConstraint = () => true;
/**
* Converts a pipeline into an easier-to-parse list of stream types within the pipeline
*
* @param pipeline - The pipeline of edges returned by findPipeline(...)
*/
function reducePath(pipeline: Edge[]) {
const streams = [pipeline[0].from.type];
for (const edge of pipeline.slice(1)) {
streams.push(edge.from.type);
}
streams.push(pipeline[pipeline.length - 1].to.type);
return streams;
}
const isVolume = (edge: Edge) => edge.type === TransformerType.InlineVolume;
const containsVolume = (edges: Edge[]) => edges.some(isVolume);
describe('findPipeline (no constraints)', () => {
test.each([StreamType.Arbitrary, StreamType.OggOpus, StreamType.WebmOpus, StreamType.Raw])(
'%s maps to opus with no inline volume',
(type) => {
const pipeline = findPipeline(type, noConstraint);
const path = reducePath(pipeline);
expect(path.length).toBeGreaterThanOrEqual(2);
expect(path[0]).toBe(type);
expect(path.pop()).toBe(StreamType.Opus);
expect(pipeline.some(isVolume)).toBe(false);
},
);
test('opus is unchanged', () => {
expect(findPipeline(StreamType.Opus, noConstraint)).toHaveLength(0);
});
});
describe('findPipeline (volume constraint)', () => {
test.each(Object.values(StreamType))('%s maps to opus with inline volume', (type) => {
const pipeline = findPipeline(type, containsVolume);
const path = reducePath(pipeline);
expect(path.length).toBeGreaterThanOrEqual(2);
expect(path[0]).toBe(type);
expect(path.pop()).toBe(StreamType.Opus);
expect(pipeline.some(isVolume)).toBe(true);
});
});

View File

@@ -0,0 +1,21 @@
export {
AudioPlayer,
AudioPlayerStatus,
AudioPlayerState,
NoSubscriberBehavior,
createAudioPlayer,
AudioPlayerBufferingState,
AudioPlayerIdleState,
AudioPlayerPausedState,
AudioPlayerPlayingState,
CreateAudioPlayerOptions,
AudioPlayerEvents,
} from './AudioPlayer';
export { AudioPlayerError } from './AudioPlayerError';
export { AudioResource, CreateAudioResourceOptions, createAudioResource } from './AudioResource';
export { PlayerSubscription } from './PlayerSubscription';
export { StreamType } from './TransformerGraph';

View File

@@ -0,0 +1,22 @@
export * from './joinVoiceChannel';
export * from './audio';
export * from './util';
export * from './receive';
export {
VoiceConnection,
VoiceConnectionState,
VoiceConnectionStatus,
VoiceConnectionConnectingState,
VoiceConnectionDestroyedState,
VoiceConnectionDisconnectedState,
VoiceConnectionDisconnectedBaseState,
VoiceConnectionDisconnectedOtherState,
VoiceConnectionDisconnectedWebSocketState,
VoiceConnectionDisconnectReason,
VoiceConnectionReadyState,
VoiceConnectionSignallingState,
VoiceConnectionEvents,
} from './VoiceConnection';
export { JoinConfig, getVoiceConnection, getVoiceConnections, getGroups } from './DataStore';

View File

@@ -0,0 +1,66 @@
import { createVoiceConnection } from './VoiceConnection';
import type { JoinConfig } from './DataStore';
import type { DiscordGatewayAdapterCreator } from './util/adapter';
/**
* The options that can be given when creating a voice connection.
*/
export interface CreateVoiceConnectionOptions {
/**
* If true, debug messages will be enabled for the voice connection and its
* related components. Defaults to false.
*/
debug?: boolean;
adapterCreator: DiscordGatewayAdapterCreator;
}
/**
* The options that can be given when joining a voice channel.
*/
export interface JoinVoiceChannelOptions {
/**
* The id of the Discord voice channel to join.
*/
channelId: string;
/**
* The id of the guild that the voice channel belongs to.
*/
guildId: string;
/**
* Whether to join the channel deafened (defaults to true)
*/
selfDeaf?: boolean;
/**
* Whether to join the channel muted (defaults to true)
*/
selfMute?: boolean;
/**
* An optional group identifier for the voice connection.
*/
group?: string;
}
/**
* Creates a VoiceConnection to a Discord voice channel.
*
* @param voiceChannel - the voice channel to connect to
* @param options - the options for joining the voice channel
*/
export function joinVoiceChannel(options: JoinVoiceChannelOptions & CreateVoiceConnectionOptions) {
const joinConfig: JoinConfig = {
selfDeaf: true,
selfMute: false,
group: 'default',
...options,
};
return createVoiceConnection(joinConfig, {
adapterCreator: options.adapterCreator,
debug: options.debug,
});
}

View File

@@ -0,0 +1,594 @@
import { VoiceOpcodes } from 'discord-api-types/voice/v4';
import { VoiceUDPSocket } from './VoiceUDPSocket';
import { VoiceWebSocket } from './VoiceWebSocket';
import * as secretbox from '../util/Secretbox';
import { Awaited, noop } from '../util/util';
import type { CloseEvent } from 'ws';
import { TypedEmitter } from 'tiny-typed-emitter';
// The number of audio channels required by Discord
const CHANNELS = 2;
const TIMESTAMP_INC = (48000 / 100) * CHANNELS;
const MAX_NONCE_SIZE = 2 ** 32 - 1;
export const SUPPORTED_ENCRYPTION_MODES = ['xsalsa20_poly1305_lite', 'xsalsa20_poly1305_suffix', 'xsalsa20_poly1305'];
/**
* The different statuses that a networking instance can hold. The order
* of the states between OpeningWs and Ready is chronological (first the
* instance enters OpeningWs, then it enters Identifying etc.)
*/
export enum NetworkingStatusCode {
OpeningWs,
Identifying,
UdpHandshaking,
SelectingProtocol,
Ready,
Resuming,
Closed,
}
/**
* The initial Networking state. Instances will be in this state when a WebSocket connection to a Discord
* voice gateway is being opened.
*/
export interface NetworkingOpeningWsState {
code: NetworkingStatusCode.OpeningWs;
ws: VoiceWebSocket;
connectionOptions: ConnectionOptions;
}
/**
* The state that a Networking instance will be in when it is attempting to authorize itself.
*/
export interface NetworkingIdentifyingState {
code: NetworkingStatusCode.Identifying;
ws: VoiceWebSocket;
connectionOptions: ConnectionOptions;
}
/**
* The state that a Networking instance will be in when opening a UDP connection to the IP and port provided
* by Discord, as well as performing IP discovery.
*/
export interface NetworkingUdpHandshakingState {
code: NetworkingStatusCode.UdpHandshaking;
ws: VoiceWebSocket;
udp: VoiceUDPSocket;
connectionOptions: ConnectionOptions;
connectionData: Pick<ConnectionData, 'ssrc'>;
}
/**
* The state that a Networking instance will be in when selecting an encryption protocol for audio packets.
*/
export interface NetworkingSelectingProtocolState {
code: NetworkingStatusCode.SelectingProtocol;
ws: VoiceWebSocket;
udp: VoiceUDPSocket;
connectionOptions: ConnectionOptions;
connectionData: Pick<ConnectionData, 'ssrc'>;
}
/**
* The state that a Networking instance will be in when it has a fully established connection to a Discord
* voice server.
*/
export interface NetworkingReadyState {
code: NetworkingStatusCode.Ready;
ws: VoiceWebSocket;
udp: VoiceUDPSocket;
connectionOptions: ConnectionOptions;
connectionData: ConnectionData;
preparedPacket?: Buffer;
}
/**
* The state that a Networking instance will be in when its connection has been dropped unexpectedly, and it
* is attempting to resume an existing session.
*/
export interface NetworkingResumingState {
code: NetworkingStatusCode.Resuming;
ws: VoiceWebSocket;
udp: VoiceUDPSocket;
connectionOptions: ConnectionOptions;
connectionData: ConnectionData;
preparedPacket?: Buffer;
}
/**
* The state that a Networking instance will be in when it has been destroyed. It cannot be recovered from this
* state.
*/
export interface NetworkingClosedState {
code: NetworkingStatusCode.Closed;
}
/**
* The various states that a networking instance can be in.
*/
export type NetworkingState =
| NetworkingOpeningWsState
| NetworkingIdentifyingState
| NetworkingUdpHandshakingState
| NetworkingSelectingProtocolState
| NetworkingReadyState
| NetworkingResumingState
| NetworkingClosedState;
/**
* Details required to connect to the Discord voice gateway. These details
* are first received on the main bot gateway, in the form of VOICE_SERVER_UPDATE
* and VOICE_STATE_UPDATE packets.
*/
interface ConnectionOptions {
serverId: string;
userId: string;
sessionId: string;
token: string;
endpoint: string;
}
/**
* Information about the current connection, e.g. which encryption mode is to be used on
* the connection, timing information for playback of streams.
*/
export interface ConnectionData {
ssrc: number;
encryptionMode: string;
secretKey: Uint8Array;
sequence: number;
timestamp: number;
packetsPlayed: number;
nonce: number;
nonceBuffer: Buffer;
speaking: boolean;
}
/**
* An empty buffer that is reused in packet encryption by many different networking instances.
*/
const nonce = Buffer.alloc(24);
export interface NetworkingEvents {
debug: (message: string) => Awaited<void>;
error: (error: Error) => Awaited<void>;
stateChange: (oldState: NetworkingState, newState: NetworkingState) => Awaited<void>;
close: (code: number) => Awaited<void>;
}
/**
* Manages the networking required to maintain a voice connection and dispatch audio packets
*/
export class Networking extends TypedEmitter<NetworkingEvents> {
private _state: NetworkingState;
/**
* The debug logger function, if debugging is enabled.
*/
private readonly debug: null | ((message: string) => void);
/**
* Creates a new Networking instance.
*/
public constructor(options: ConnectionOptions, debug: boolean) {
super();
this.onWsOpen = this.onWsOpen.bind(this);
this.onChildError = this.onChildError.bind(this);
this.onWsPacket = this.onWsPacket.bind(this);
this.onWsClose = this.onWsClose.bind(this);
this.onWsDebug = this.onWsDebug.bind(this);
this.onUdpDebug = this.onUdpDebug.bind(this);
this.onUdpClose = this.onUdpClose.bind(this);
this.debug = debug ? (message: string) => this.emit('debug', message) : null;
this._state = {
code: NetworkingStatusCode.OpeningWs,
ws: this.createWebSocket(options.endpoint),
connectionOptions: options,
};
}
/**
* Destroys the Networking instance, transitioning it into the Closed state.
*/
public destroy() {
this.state = {
code: NetworkingStatusCode.Closed,
};
}
/**
* The current state of the networking instance.
*/
public get state(): NetworkingState {
return this._state;
}
/**
* Sets a new state for the networking instance, performing clean-up operations where necessary.
*/
public set state(newState: NetworkingState) {
const oldWs = Reflect.get(this._state, 'ws') as VoiceWebSocket | undefined;
const newWs = Reflect.get(newState, 'ws') as VoiceWebSocket | undefined;
if (oldWs && oldWs !== newWs) {
// The old WebSocket is being freed - remove all handlers from it
oldWs.off('debug', this.onWsDebug);
oldWs.on('error', noop);
oldWs.off('error', this.onChildError);
oldWs.off('open', this.onWsOpen);
oldWs.off('packet', this.onWsPacket);
oldWs.off('close', this.onWsClose);
oldWs.destroy();
}
const oldUdp = Reflect.get(this._state, 'udp') as VoiceUDPSocket | undefined;
const newUdp = Reflect.get(newState, 'udp') as VoiceUDPSocket | undefined;
if (oldUdp && oldUdp !== newUdp) {
oldUdp.on('error', noop);
oldUdp.off('error', this.onChildError);
oldUdp.off('close', this.onUdpClose);
oldUdp.off('debug', this.onUdpDebug);
oldUdp.destroy();
}
const oldState = this._state;
this._state = newState;
this.emit('stateChange', oldState, newState);
/**
* Debug event for Networking.
*
* @event Networking#debug
* @type {string}
*/
this.debug?.(`state change:\nfrom ${stringifyState(oldState)}\nto ${stringifyState(newState)}`);
}
/**
* Creates a new WebSocket to a Discord Voice gateway.
*
* @param endpoint - The endpoint to connect to
* @param debug - Whether to enable debug logging
*/
private createWebSocket(endpoint: string) {
const ws = new VoiceWebSocket(`wss://${endpoint}?v=4`, Boolean(this.debug));
ws.on('error', this.onChildError);
ws.once('open', this.onWsOpen);
ws.on('packet', this.onWsPacket);
ws.once('close', this.onWsClose);
ws.on('debug', this.onWsDebug);
return ws;
}
/**
* Propagates errors from the children VoiceWebSocket and VoiceUDPSocket.
*
* @param error - The error that was emitted by a child
*/
private onChildError(error: Error) {
this.emit('error', error);
}
/**
* Called when the WebSocket opens. Depending on the state that the instance is in,
* it will either identify with a new session, or it will attempt to resume an existing session.
*/
private onWsOpen() {
if (this.state.code === NetworkingStatusCode.OpeningWs) {
const packet = {
op: VoiceOpcodes.Identify,
d: {
server_id: this.state.connectionOptions.serverId,
user_id: this.state.connectionOptions.userId,
session_id: this.state.connectionOptions.sessionId,
token: this.state.connectionOptions.token,
},
};
this.state.ws.sendPacket(packet);
this.state = {
...this.state,
code: NetworkingStatusCode.Identifying,
};
} else if (this.state.code === NetworkingStatusCode.Resuming) {
const packet = {
op: VoiceOpcodes.Resume,
d: {
server_id: this.state.connectionOptions.serverId,
session_id: this.state.connectionOptions.sessionId,
token: this.state.connectionOptions.token,
},
};
this.state.ws.sendPacket(packet);
}
}
/**
* Called when the WebSocket closes. Based on the reason for closing (given by the code parameter),
* the instance will either attempt to resume, or enter the closed state and emit a 'close' event
* with the close code, allowing the user to decide whether or not they would like to reconnect.
*
* @param code - The close code
*/
private onWsClose({ code }: CloseEvent) {
const canResume = code === 4015 || code < 4000;
if (canResume && this.state.code === NetworkingStatusCode.Ready) {
this.state = {
...this.state,
code: NetworkingStatusCode.Resuming,
ws: this.createWebSocket(this.state.connectionOptions.endpoint),
};
} else if (this.state.code !== NetworkingStatusCode.Closed) {
this.destroy();
this.emit('close', code);
}
}
/**
* Called when the UDP socket has closed itself if it has stopped receiving replies from Discord.
*/
private onUdpClose() {
if (this.state.code === NetworkingStatusCode.Ready) {
this.state = {
...this.state,
code: NetworkingStatusCode.Resuming,
ws: this.createWebSocket(this.state.connectionOptions.endpoint),
};
}
}
/**
* Called when a packet is received on the connection's WebSocket.
*
* @param packet - The received packet
*/
private onWsPacket(packet: any) {
if (packet.op === VoiceOpcodes.Hello && this.state.code !== NetworkingStatusCode.Closed) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
this.state.ws.setHeartbeatInterval(packet.d.heartbeat_interval);
} else if (packet.op === VoiceOpcodes.Ready && this.state.code === NetworkingStatusCode.Identifying) {
const { ip, port, ssrc, modes } = packet.d;
const udp = new VoiceUDPSocket({ ip, port });
udp.on('error', this.onChildError);
udp.on('debug', this.onUdpDebug);
udp.once('close', this.onUdpClose);
udp
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
.performIPDiscovery(ssrc)
.then((localConfig) => {
if (this.state.code !== NetworkingStatusCode.UdpHandshaking) return;
this.state.ws.sendPacket({
op: VoiceOpcodes.SelectProtocol,
d: {
protocol: 'udp',
data: {
address: localConfig.ip,
port: localConfig.port,
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
mode: chooseEncryptionMode(modes),
},
},
});
this.state = {
...this.state,
code: NetworkingStatusCode.SelectingProtocol,
};
})
.catch((error: Error) => this.emit('error', error));
this.state = {
...this.state,
code: NetworkingStatusCode.UdpHandshaking,
udp,
connectionData: {
ssrc,
},
};
} else if (
packet.op === VoiceOpcodes.SessionDescription &&
this.state.code === NetworkingStatusCode.SelectingProtocol
) {
const { mode: encryptionMode, secret_key: secretKey } = packet.d;
this.state = {
...this.state,
code: NetworkingStatusCode.Ready,
connectionData: {
...this.state.connectionData,
encryptionMode,
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
secretKey: new Uint8Array(secretKey),
sequence: randomNBit(16),
timestamp: randomNBit(32),
nonce: 0,
nonceBuffer: Buffer.alloc(24),
speaking: false,
packetsPlayed: 0,
},
};
} else if (packet.op === VoiceOpcodes.Resumed && this.state.code === NetworkingStatusCode.Resuming) {
this.state = {
...this.state,
code: NetworkingStatusCode.Ready,
};
this.state.connectionData.speaking = false;
}
}
/**
* Propagates debug messages from the child WebSocket.
*
* @param message - The emitted debug message
*/
private onWsDebug(message: string) {
this.debug?.(`[WS] ${message}`);
}
/**
* Propagates debug messages from the child UDPSocket.
*
* @param message - The emitted debug message
*/
private onUdpDebug(message: string) {
this.debug?.(`[UDP] ${message}`);
}
/**
* Prepares an Opus packet for playback. This includes attaching metadata to it and encrypting it.
* It will be stored within the instance, and can be played by dispatchAudio()
*
* @remarks
* Calling this method while there is already a prepared audio packet that has not yet been dispatched
* will overwrite the existing audio packet. This should be avoided.
*
* @param opusPacket - The Opus packet to encrypt
*
* @returns The audio packet that was prepared
*/
public prepareAudioPacket(opusPacket: Buffer) {
const state = this.state;
if (state.code !== NetworkingStatusCode.Ready) return;
state.preparedPacket = this.createAudioPacket(opusPacket, state.connectionData);
return state.preparedPacket;
}
/**
* Dispatches the audio packet previously prepared by prepareAudioPacket(opusPacket). The audio packet
* is consumed and cannot be dispatched again.
*/
public dispatchAudio() {
const state = this.state;
if (state.code !== NetworkingStatusCode.Ready) return false;
if (typeof state.preparedPacket !== 'undefined') {
this.playAudioPacket(state.preparedPacket);
state.preparedPacket = undefined;
return true;
}
return false;
}
/**
* Plays an audio packet, updating timing metadata used for playback.
*
* @param audioPacket - The audio packet to play
*/
private playAudioPacket(audioPacket: Buffer) {
const state = this.state;
if (state.code !== NetworkingStatusCode.Ready) return;
const { connectionData } = state;
connectionData.packetsPlayed++;
connectionData.sequence++;
connectionData.timestamp += TIMESTAMP_INC;
if (connectionData.sequence >= 2 ** 16) connectionData.sequence = 0;
if (connectionData.timestamp >= 2 ** 32) connectionData.timestamp = 0;
this.setSpeaking(true);
state.udp.send(audioPacket);
}
/**
* Sends a packet to the voice gateway indicating that the client has start/stopped sending
* audio.
*
* @param speaking - Whether or not the client should be shown as speaking
*/
public setSpeaking(speaking: boolean) {
const state = this.state;
if (state.code !== NetworkingStatusCode.Ready) return;
if (state.connectionData.speaking === speaking) return;
state.connectionData.speaking = speaking;
state.ws.sendPacket({
op: VoiceOpcodes.Speaking,
d: {
speaking: speaking ? 1 : 0,
delay: 0,
ssrc: state.connectionData.ssrc,
},
});
}
/**
* Creates a new audio packet from an Opus packet. This involves encrypting the packet,
* then prepending a header that includes metadata.
*
* @param opusPacket - The Opus packet to prepare
* @param connectionData - The current connection data of the instance
*/
private createAudioPacket(opusPacket: Buffer, connectionData: ConnectionData) {
const packetBuffer = Buffer.alloc(12);
packetBuffer[0] = 0x80;
packetBuffer[1] = 0x78;
const { sequence, timestamp, ssrc } = connectionData;
packetBuffer.writeUIntBE(sequence, 2, 2);
packetBuffer.writeUIntBE(timestamp, 4, 4);
packetBuffer.writeUIntBE(ssrc, 8, 4);
packetBuffer.copy(nonce, 0, 0, 12);
return Buffer.concat([packetBuffer, ...this.encryptOpusPacket(opusPacket, connectionData)]);
}
/**
* Encrypts an Opus packet using the format agreed upon by the instance and Discord.
*
* @param opusPacket - The Opus packet to encrypt
* @param connectionData - The current connection data of the instance
*/
private encryptOpusPacket(opusPacket: Buffer, connectionData: ConnectionData) {
const { secretKey, encryptionMode } = connectionData;
if (encryptionMode === 'xsalsa20_poly1305_lite') {
connectionData.nonce++;
if (connectionData.nonce > MAX_NONCE_SIZE) connectionData.nonce = 0;
connectionData.nonceBuffer.writeUInt32BE(connectionData.nonce, 0);
return [
secretbox.methods.close(opusPacket, connectionData.nonceBuffer, secretKey),
connectionData.nonceBuffer.slice(0, 4),
];
} else if (encryptionMode === 'xsalsa20_poly1305_suffix') {
const random = secretbox.methods.random(24, connectionData.nonceBuffer);
return [secretbox.methods.close(opusPacket, random, secretKey), random];
}
return [secretbox.methods.close(opusPacket, nonce, secretKey)];
}
}
/**
* Returns a random number that is in the range of n bits.
*
* @param n - The number of bits
*/
function randomNBit(n: number) {
return Math.floor(Math.random() * 2 ** n);
}
/**
* Stringifies a NetworkingState.
*
* @param state - The state to stringify
*/
function stringifyState(state: NetworkingState) {
return JSON.stringify({
...state,
ws: Reflect.has(state, 'ws'),
udp: Reflect.has(state, 'udp'),
});
}
/**
* Chooses an encryption mode from a list of given options. Chooses the most preferred option.
*
* @param options - The available encryption options
*/
function chooseEncryptionMode(options: string[]): string {
const option = options.find((option) => SUPPORTED_ENCRYPTION_MODES.includes(option));
if (!option) {
throw new Error(`No compatible encryption modes. Available include: ${options.join(', ')}`);
}
return option;
}

View File

@@ -0,0 +1,212 @@
import { createSocket, Socket } from 'node:dgram';
import { isIPv4 } from 'node:net';
import { TypedEmitter } from 'tiny-typed-emitter';
import type { Awaited } from '../util/util';
/**
* Stores an IP address and port. Used to store socket details for the local client as well as
* for Discord.
*/
export interface SocketConfig {
ip: string;
port: number;
}
interface KeepAlive {
value: number;
timestamp: number;
}
export interface VoiceUDPSocketEvents {
error: (error: Error) => Awaited<void>;
close: () => Awaited<void>;
debug: (message: string) => Awaited<void>;
message: (message: Buffer) => Awaited<void>;
}
/**
* The interval in milliseconds at which keep alive datagrams are sent.
*/
const KEEP_ALIVE_INTERVAL = 5e3;
/**
* The maximum number of keep alive packets which can be missed.
*/
const KEEP_ALIVE_LIMIT = 12;
/**
* The maximum value of the keep alive counter.
*/
const MAX_COUNTER_VALUE = 2 ** 32 - 1;
/**
* Manages the UDP networking for a voice connection.
*/
export class VoiceUDPSocket extends TypedEmitter<VoiceUDPSocketEvents> {
/**
* The underlying network Socket for the VoiceUDPSocket.
*/
private readonly socket: Socket;
/**
* The socket details for Discord (remote)
*/
private readonly remote: SocketConfig;
/**
* A list of keep alives that are waiting to be acknowledged.
*/
private readonly keepAlives: KeepAlive[];
/**
* The counter used in the keep alive mechanism.
*/
private keepAliveCounter = 0;
/**
* The buffer used to write the keep alive counter into.
*/
private readonly keepAliveBuffer: Buffer;
/**
* The Node.js interval for the keep-alive mechanism.
*/
private readonly keepAliveInterval: NodeJS.Timeout;
/**
* The time taken to receive a response to keep alive messages.
*/
public ping?: number;
/**
* The debug logger function, if debugging is enabled.
*/
private readonly debug: null | ((message: string) => void);
/**
* Creates a new VoiceUDPSocket.
*
* @param remote - Details of the remote socket
*/
public constructor(remote: SocketConfig, debug = false) {
super();
this.socket = createSocket('udp4');
this.socket.on('error', (error: Error) => this.emit('error', error));
this.socket.on('message', (buffer: Buffer) => this.onMessage(buffer));
this.socket.on('close', () => this.emit('close'));
this.remote = remote;
this.keepAlives = [];
this.keepAliveBuffer = Buffer.alloc(8);
this.keepAliveInterval = setInterval(() => this.keepAlive(), KEEP_ALIVE_INTERVAL);
setImmediate(() => this.keepAlive());
this.debug = debug ? (message: string) => this.emit('debug', message) : null;
}
/**
* Called when a message is received on the UDP socket.
*
* @param buffer The received buffer
*/
private onMessage(buffer: Buffer): void {
// Handle keep alive message
if (buffer.length === 8) {
const counter = buffer.readUInt32LE(0);
const index = this.keepAlives.findIndex(({ value }) => value === counter);
if (index === -1) return;
this.ping = Date.now() - this.keepAlives[index].timestamp;
// Delete all keep alives up to and including the received one
this.keepAlives.splice(0, index);
}
// Propagate the message
this.emit('message', buffer);
}
/**
* Called at a regular interval to check whether we are still able to send datagrams to Discord.
*/
private keepAlive() {
if (this.keepAlives.length >= KEEP_ALIVE_LIMIT) {
this.debug?.('UDP socket has not received enough responses from Discord - closing socket');
this.destroy();
return;
}
this.keepAliveBuffer.writeUInt32LE(this.keepAliveCounter, 0);
this.send(this.keepAliveBuffer);
this.keepAlives.push({
value: this.keepAliveCounter,
timestamp: Date.now(),
});
this.keepAliveCounter++;
if (this.keepAliveCounter > MAX_COUNTER_VALUE) {
this.keepAliveCounter = 0;
}
}
/**
* Sends a buffer to Discord.
*
* @param buffer - The buffer to send
*/
public send(buffer: Buffer) {
return this.socket.send(buffer, this.remote.port, this.remote.ip);
}
/**
* Closes the socket, the instance will not be able to be reused.
*/
public destroy() {
try {
this.socket.close();
} catch {}
clearInterval(this.keepAliveInterval);
}
/**
* Performs IP discovery to discover the local address and port to be used for the voice connection.
*
* @param ssrc - The SSRC received from Discord
*/
public performIPDiscovery(ssrc: number): Promise<SocketConfig> {
return new Promise((resolve, reject) => {
const listener = (message: Buffer) => {
try {
if (message.readUInt16BE(0) !== 2) return;
const packet = parseLocalPacket(message);
this.socket.off('message', listener);
resolve(packet);
} catch {}
};
this.socket.on('message', listener);
this.socket.once('close', () => reject(new Error('Cannot perform IP discovery - socket closed')));
const discoveryBuffer = Buffer.alloc(74);
discoveryBuffer.writeUInt16BE(1, 0);
discoveryBuffer.writeUInt16BE(70, 2);
discoveryBuffer.writeUInt32BE(ssrc, 4);
this.send(discoveryBuffer);
});
}
}
/**
* Parses the response from Discord to aid with local IP discovery.
*
* @param message - The received message
*/
export function parseLocalPacket(message: Buffer): SocketConfig {
const packet = Buffer.from(message);
const ip = packet.slice(8, packet.indexOf(0, 8)).toString('utf-8');
if (!isIPv4(ip)) {
throw new Error('Malformed IP address');
}
const port = packet.readUInt16BE(packet.length - 2);
return { ip, port };
}

View File

@@ -0,0 +1,179 @@
import { VoiceOpcodes } from 'discord-api-types/voice/v4';
import WebSocket, { MessageEvent } from 'ws';
import { TypedEmitter } from 'tiny-typed-emitter';
import type { Awaited } from '../util/util';
/**
* Debug event for VoiceWebSocket.
*
* @event VoiceWebSocket#debug
* @type {string}
*/
export interface VoiceWebSocketEvents {
error: (error: Error) => Awaited<void>;
open: (event: WebSocket.Event) => Awaited<void>;
close: (event: WebSocket.CloseEvent) => Awaited<void>;
debug: (message: string) => Awaited<void>;
packet: (packet: any) => Awaited<void>;
}
/**
* An extension of the WebSocket class to provide helper functionality when interacting
* with the Discord Voice gateway.
*/
export class VoiceWebSocket extends TypedEmitter<VoiceWebSocketEvents> {
/**
* The current heartbeat interval, if any.
*/
private heartbeatInterval?: NodeJS.Timeout;
/**
* The time (milliseconds since UNIX epoch) that the last heartbeat acknowledgement packet was received.
* This is set to 0 if an acknowledgement packet hasn't been received yet.
*/
private lastHeartbeatAck: number;
/**
* The time (milliseconds since UNIX epoch) that the last heartbeat was sent. This is set to 0 if a heartbeat
* hasn't been sent yet.
*/
private lastHeatbeatSend: number;
/**
* The number of consecutively missed heartbeats.
*/
private missedHeartbeats = 0;
/**
* The last recorded ping.
*/
public ping?: number;
/**
* The debug logger function, if debugging is enabled.
*/
private readonly debug: null | ((message: string) => void);
/**
* The underlying WebSocket of this wrapper.
*/
private readonly ws: WebSocket;
/**
* Creates a new VoiceWebSocket.
*
* @param address - The address to connect to
*/
public constructor(address: string, debug: boolean) {
super();
this.ws = new WebSocket(address);
this.ws.onmessage = (e) => this.onMessage(e);
this.ws.onopen = (e) => this.emit('open', e);
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
this.ws.onerror = (e: Error | WebSocket.ErrorEvent) => this.emit('error', e instanceof Error ? e : e.error);
this.ws.onclose = (e) => this.emit('close', e);
this.lastHeartbeatAck = 0;
this.lastHeatbeatSend = 0;
this.debug = debug ? (message: string) => this.emit('debug', message) : null;
}
/**
* Destroys the VoiceWebSocket. The heartbeat interval is cleared, and the connection is closed.
*/
public destroy() {
try {
this.debug?.('destroyed');
this.setHeartbeatInterval(-1);
this.ws.close(1000);
} catch (error) {
const e = error as Error;
this.emit('error', e);
}
}
/**
* Handles message events on the WebSocket. Attempts to JSON parse the messages and emit them
* as packets.
*
* @param event - The message event
*/
public onMessage(event: MessageEvent) {
if (typeof event.data !== 'string') return;
this.debug?.(`<< ${event.data}`);
let packet: any;
try {
packet = JSON.parse(event.data);
} catch (error) {
const e = error as Error;
this.emit('error', e);
return;
}
if (packet.op === VoiceOpcodes.HeartbeatAck) {
this.lastHeartbeatAck = Date.now();
this.missedHeartbeats = 0;
this.ping = this.lastHeartbeatAck - this.lastHeatbeatSend;
}
/**
* Packet event.
*
* @event VoiceWebSocket#packet
* @type {any}
*/
this.emit('packet', packet);
}
/**
* Sends a JSON-stringifiable packet over the WebSocket.
*
* @param packet - The packet to send
*/
public sendPacket(packet: any) {
try {
const stringified = JSON.stringify(packet);
this.debug?.(`>> ${stringified}`);
return this.ws.send(stringified);
} catch (error) {
const e = error as Error;
this.emit('error', e);
}
}
/**
* Sends a heartbeat over the WebSocket.
*/
private sendHeartbeat() {
this.lastHeatbeatSend = Date.now();
this.missedHeartbeats++;
const nonce = this.lastHeatbeatSend;
return this.sendPacket({
op: VoiceOpcodes.Heartbeat,
d: nonce,
});
}
/**
* Sets/clears an interval to send heartbeats over the WebSocket.
*
* @param ms - The interval in milliseconds. If negative, the interval will be unset
*/
public setHeartbeatInterval(ms: number) {
if (typeof this.heartbeatInterval !== 'undefined') clearInterval(this.heartbeatInterval);
if (ms > 0) {
this.heartbeatInterval = setInterval(() => {
if (this.lastHeatbeatSend !== 0 && this.missedHeartbeats >= 3) {
// Missed too many heartbeats - disconnect
this.ws.close();
this.setHeartbeatInterval(-1);
}
this.sendHeartbeat();
}, ms);
}
}
}

View File

@@ -0,0 +1,170 @@
/* eslint-disable @typescript-eslint/no-empty-function */
/* eslint-disable @typescript-eslint/no-unused-vars */
import { createSocket as _createSocket } from 'node:dgram';
import EventEmitter, { once } from 'node:events';
import { VoiceUDPSocket } from '../VoiceUDPSocket';
jest.mock('node:dgram');
jest.useFakeTimers();
const createSocket = _createSocket as unknown as jest.Mock<typeof _createSocket>;
beforeEach(() => {
createSocket.mockReset();
});
class FakeSocket extends EventEmitter {
public send(buffer: Buffer, port: number, address: string) {}
public close() {
this.emit('close');
}
}
// ip = 91.90.123.93, port = 54148
// eslint-disable-next-line prettier/prettier
const VALID_RESPONSE = Buffer.from([
0x0, 0x2, 0x0, 0x46, 0x0, 0x4, 0xeb, 0x23, 0x39, 0x31, 0x2e, 0x39, 0x30, 0x2e, 0x31, 0x32, 0x33, 0x2e, 0x39, 0x33,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd3, 0x84,
]);
function wait() {
return new Promise((resolve) => {
setImmediate(resolve);
jest.advanceTimersToNextTimer();
});
}
describe('VoiceUDPSocket#performIPDiscovery', () => {
let socket: VoiceUDPSocket;
afterEach(() => {
socket.destroy();
});
/*
Ensures that the UDP socket sends data and parses the response correctly
*/
test('Resolves and cleans up with a successful flow', async () => {
const fake = new FakeSocket();
fake.send = jest.fn().mockImplementation((buffer: Buffer, port: number, address: string) => {
fake.emit('message', VALID_RESPONSE);
});
createSocket.mockImplementation((type) => fake as any);
socket = new VoiceUDPSocket({ ip: '1.2.3.4', port: 25565 });
expect(createSocket).toHaveBeenCalledWith('udp4');
expect(fake.listenerCount('message')).toBe(1);
await expect(socket.performIPDiscovery(1234)).resolves.toEqual({
ip: '91.90.123.93',
port: 54148,
});
// Ensure clean up occurs
expect(fake.listenerCount('message')).toBe(1);
});
/*
In the case where an unrelated message is received before the IP discovery buffer,
the UDP socket should wait indefinitely until the correct buffer arrives.
*/
test('Waits for a valid response in an unexpected flow', async () => {
const fake = new FakeSocket();
const fakeResponse = Buffer.from([1, 2, 3, 4, 5]);
fake.send = jest.fn().mockImplementation(async (buffer: Buffer, port: number, address: string) => {
fake.emit('message', fakeResponse);
await wait();
fake.emit('message', VALID_RESPONSE);
});
createSocket.mockImplementation(() => fake as any);
socket = new VoiceUDPSocket({ ip: '1.2.3.4', port: 25565 });
expect(createSocket).toHaveBeenCalledWith('udp4');
expect(fake.listenerCount('message')).toBe(1);
await expect(socket.performIPDiscovery(1234)).resolves.toEqual({
ip: '91.90.123.93',
port: 54148,
});
// Ensure clean up occurs
expect(fake.listenerCount('message')).toBe(1);
});
test('Rejects if socket closes before IP discovery can be completed', async () => {
const fake = new FakeSocket();
fake.send = jest.fn().mockImplementation(async (buffer: Buffer, port: number, address: string) => {
await wait();
fake.close();
});
createSocket.mockImplementation(() => fake as any);
socket = new VoiceUDPSocket({ ip: '1.2.3.4', port: 25565 });
expect(createSocket).toHaveBeenCalledWith('udp4');
await expect(socket.performIPDiscovery(1234)).rejects.toThrowError();
});
test('Stays alive when messages are echoed back', async () => {
const fake = new FakeSocket();
fake.send = jest.fn().mockImplementation(async (buffer: Buffer) => {
await wait();
fake.emit('message', buffer);
});
createSocket.mockImplementation(() => fake as any);
socket = new VoiceUDPSocket({ ip: '1.2.3.4', port: 25565 });
let closed = false;
// @ts-expect-error
socket.on('close', () => (closed = true));
for (let i = 0; i < 30; i++) {
jest.advanceTimersToNextTimer();
await wait();
}
expect(closed).toBe(false);
});
test('Emits an error when no response received to keep alive messages', async () => {
const fake = new FakeSocket();
fake.send = jest.fn();
createSocket.mockImplementation(() => fake as any);
socket = new VoiceUDPSocket({ ip: '1.2.3.4', port: 25565 });
let closed = false;
// @ts-expect-error
socket.on('close', () => (closed = true));
for (let i = 0; i < 15; i++) {
jest.advanceTimersToNextTimer();
await wait();
}
expect(closed).toBe(true);
});
test('Recovers from intermittent responses', async () => {
const fake = new FakeSocket();
const fakeSend = jest.fn();
fake.send = fakeSend;
createSocket.mockImplementation(() => fake as any);
socket = new VoiceUDPSocket({ ip: '1.2.3.4', port: 25565 });
let closed = false;
// @ts-expect-error
socket.on('close', () => (closed = true));
for (let i = 0; i < 10; i++) {
jest.advanceTimersToNextTimer();
await wait();
}
fakeSend.mockImplementation(async (buffer: Buffer) => {
await wait();
fake.emit('message', buffer);
});
expect(closed).toBe(false);
for (let i = 0; i < 30; i++) {
jest.advanceTimersToNextTimer();
await wait();
}
expect(closed).toBe(false);
});
});

View File

@@ -0,0 +1,121 @@
import { VoiceOpcodes } from 'discord-api-types/voice/v4';
import EventEmitter, { once } from 'node:events';
import WS from 'jest-websocket-mock';
import { VoiceWebSocket } from '../VoiceWebSocket';
beforeEach(() => {
WS.clean();
});
function onceIgnoreError<T extends EventEmitter>(target: T, event: string) {
return new Promise((resolve) => {
target.on(event, resolve);
});
}
function onceOrThrow<T extends EventEmitter>(target: T, event: string, after: number) {
return new Promise((resolve, reject) => {
target.on(event, resolve);
setTimeout(() => reject(new Error('Time up')), after);
});
}
describe('VoiceWebSocket: packet parsing', () => {
test('Parses and emits packets', async () => {
const endpoint = 'ws://localhost:1234';
const server = new WS(endpoint, { jsonProtocol: true });
const ws = new VoiceWebSocket(endpoint, false);
await server.connected;
const dummy = { value: 3 };
const rcv = once(ws, 'packet');
server.send(dummy);
await expect(rcv).resolves.toEqual([dummy]);
});
test('Recovers from invalid packets', async () => {
const endpoint = 'ws://localhost:1234';
const server = new WS(endpoint);
const ws = new VoiceWebSocket(endpoint, false);
await server.connected;
let rcv = once(ws, 'packet');
server.send('asdf');
await expect(rcv).rejects.toThrowError();
const dummy = { op: 1234 };
rcv = once(ws, 'packet');
server.send(JSON.stringify(dummy));
await expect(rcv).resolves.toEqual([dummy]);
});
});
describe('VoiceWebSocket: event propagation', () => {
test('open', async () => {
const endpoint = 'ws://localhost:1234';
const server = new WS(endpoint);
const ws = new VoiceWebSocket(endpoint, false);
const rcv = once(ws, 'open');
await server.connected;
await expect(rcv).resolves.toBeTruthy();
});
test('close (clean)', async () => {
const endpoint = 'ws://localhost:1234';
const server = new WS(endpoint);
const ws = new VoiceWebSocket(endpoint, false);
await server.connected;
const rcv = once(ws, 'close');
server.close();
await expect(rcv).resolves.toBeTruthy();
});
test('close (error)', async () => {
const endpoint = 'ws://localhost:1234';
const server = new WS(endpoint);
const ws = new VoiceWebSocket(endpoint, false);
await server.connected;
const rcvError = once(ws, 'error');
const rcvClose = onceIgnoreError(ws, 'close');
server.error();
await expect(rcvError).resolves.toBeTruthy();
await expect(rcvClose).resolves.toBeTruthy();
});
});
describe('VoiceWebSocket: heartbeating', () => {
test('Normal heartbeat flow', async () => {
const endpoint = 'ws://localhost:1234';
const server = new WS(endpoint, { jsonProtocol: true });
const ws = new VoiceWebSocket(endpoint, false);
await server.connected;
const rcv = onceOrThrow(ws, 'close', 750);
ws.setHeartbeatInterval(50);
for (let i = 0; i < 10; i++) {
const packet: any = await server.nextMessage;
expect(packet).toMatchObject({
op: VoiceOpcodes.Heartbeat,
});
server.send({
op: VoiceOpcodes.HeartbeatAck,
d: packet.d,
});
expect(ws.ping).toBeGreaterThanOrEqual(0);
}
ws.setHeartbeatInterval(-1);
await expect(rcv).rejects.toThrowError();
});
test('Closes when no ack is received', async () => {
const endpoint = 'ws://localhost:1234';
const server = new WS(endpoint, { jsonProtocol: true });
const ws = new VoiceWebSocket(endpoint, false);
// eslint-disable-next-line @typescript-eslint/no-empty-function
ws.on('error', () => {});
await server.connected;
const rcv = onceIgnoreError(ws, 'close');
ws.setHeartbeatInterval(50);
await expect(rcv).resolves.toBeTruthy();
expect(ws.ping).toBe(undefined);
expect(server.messages.length).toBe(3);
});
});

View File

@@ -0,0 +1,3 @@
export * from './Networking';
export * from './VoiceUDPSocket';
export * from './VoiceWebSocket';

View File

@@ -0,0 +1,89 @@
import { Readable, ReadableOptions } from 'node:stream';
import { SILENCE_FRAME } from '../audio/AudioPlayer';
/**
* The different behaviors an audio receive stream can have for deciding when to end.
*/
export enum EndBehaviorType {
/**
* The stream will only end when manually destroyed.
*/
Manual,
/**
* The stream will end after a given time period of silence/no audio packets.
*/
AfterSilence,
/**
* The stream will end after a given time period of no audio packets.
*/
AfterInactivity,
}
export type EndBehavior =
| {
behavior: EndBehaviorType.Manual;
}
| {
behavior: EndBehaviorType.AfterSilence | EndBehaviorType.AfterInactivity;
duration: number;
};
export interface AudioReceiveStreamOptions extends ReadableOptions {
end: EndBehavior;
}
export function createDefaultAudioReceiveStreamOptions(): AudioReceiveStreamOptions {
return {
end: {
behavior: EndBehaviorType.Manual,
},
};
}
/**
* A readable stream of Opus packets received from a specific entity
* in a Discord voice connection.
*/
export class AudioReceiveStream extends Readable {
/**
* The end behavior of the receive stream.
*/
public readonly end: EndBehavior;
private endTimeout?: NodeJS.Timeout;
public constructor({ end, ...options }: AudioReceiveStreamOptions) {
super({
...options,
objectMode: true,
});
this.end = end;
}
public override push(buffer: Buffer | null) {
if (buffer) {
if (
this.end.behavior === EndBehaviorType.AfterInactivity ||
(this.end.behavior === EndBehaviorType.AfterSilence &&
(buffer.compare(SILENCE_FRAME) !== 0 || typeof this.endTimeout === 'undefined'))
) {
this.renewEndTimeout(this.end);
}
}
return super.push(buffer);
}
private renewEndTimeout(end: EndBehavior & { duration: number }) {
if (this.endTimeout) {
clearTimeout(this.endTimeout);
}
this.endTimeout = setTimeout(() => this.push(null), end.duration);
}
// eslint-disable-next-line @typescript-eslint/no-empty-function
public override _read() {}
}

View File

@@ -0,0 +1,112 @@
import { TypedEmitter } from 'tiny-typed-emitter';
import type { Awaited } from '../util/util';
/**
* The known data for a user in a Discord voice connection.
*/
export interface VoiceUserData {
/**
* The SSRC of the user's audio stream.
*/
audioSSRC: number;
/**
* The SSRC of the user's video stream (if one exists)
* Cannot be 0. If undefined, the user has no video stream.
*/
videoSSRC?: number;
/**
* The Discord user id of the user.
*/
userId: string;
}
/**
* The events that an SSRCMap may emit.
*/
export interface SSRCMapEvents {
create: (newData: VoiceUserData) => Awaited<void>;
update: (oldData: VoiceUserData | undefined, newData: VoiceUserData) => Awaited<void>;
delete: (deletedData: VoiceUserData) => Awaited<void>;
}
/**
* Maps audio SSRCs to data of users in voice connections.
*/
export class SSRCMap extends TypedEmitter<SSRCMapEvents> {
/**
* The underlying map.
*/
private readonly map: Map<number, VoiceUserData>;
public constructor() {
super();
this.map = new Map();
}
/**
* Updates the map with new user data
*
* @param data The data to update with
*/
public update(data: VoiceUserData) {
const existing = this.map.get(data.audioSSRC);
const newValue = {
...this.map.get(data.audioSSRC),
...data,
};
this.map.set(data.audioSSRC, newValue);
if (!existing) this.emit('create', newValue);
this.emit('update', existing, newValue);
}
/**
* Gets the stored voice data of a user.
*
* @param target The target, either their user id or audio SSRC
*/
public get(target: number | string) {
if (typeof target === 'number') {
return this.map.get(target);
}
for (const data of this.map.values()) {
if (data.userId === target) {
return data;
}
}
return undefined;
}
/**
* Deletes the stored voice data about a user.
*
* @param target The target of the delete operation, either their audio SSRC or user id
*
* @returns The data that was deleted, if any
*/
public delete(target: number | string) {
if (typeof target === 'number') {
const existing = this.map.get(target);
if (existing) {
this.map.delete(target);
this.emit('delete', existing);
}
return existing;
}
for (const [audioSSRC, data] of this.map.entries()) {
if (data.userId === target) {
this.map.delete(audioSSRC);
this.emit('delete', data);
return data;
}
}
return undefined;
}
}

View File

@@ -0,0 +1,62 @@
import { TypedEmitter } from 'tiny-typed-emitter';
import type { Awaited } from '../util/util';
/**
* The events that a SpeakingMap can emit.
*/
export interface SpeakingMapEvents {
/**
* Emitted when a user starts speaking.
*/
start: (userId: string) => Awaited<void>;
/**
* Emitted when a user stops speaking.
*/
end: (userId: string) => Awaited<void>;
}
/**
* Tracks the speaking states of users in a voice channel.
*/
export class SpeakingMap extends TypedEmitter<SpeakingMapEvents> {
/**
* The delay after a packet is received from a user until they're marked as not speaking anymore.
*/
public static readonly DELAY = 100;
/**
* The currently speaking users, mapped to the milliseconds since UNIX epoch at which they started speaking.
*/
public readonly users: Map<string, number>;
private readonly speakingTimeouts: Map<string, NodeJS.Timeout>;
public constructor() {
super();
this.users = new Map();
this.speakingTimeouts = new Map();
}
public onPacket(userId: string) {
const timeout = this.speakingTimeouts.get(userId);
if (timeout) {
clearTimeout(timeout);
} else {
this.users.set(userId, Date.now());
this.emit('start', userId);
}
this.startTimeout(userId);
}
private startTimeout(userId: string) {
this.speakingTimeouts.set(
userId,
setTimeout(() => {
this.emit('end', userId);
this.speakingTimeouts.delete(userId);
this.users.delete(userId);
}, SpeakingMap.DELAY),
);
}
}

View File

@@ -0,0 +1,195 @@
import { VoiceOpcodes } from 'discord-api-types/voice/v4';
import type { ConnectionData } from '../networking/Networking';
import { methods } from '../util/Secretbox';
import type { VoiceConnection } from '../VoiceConnection';
import {
AudioReceiveStream,
AudioReceiveStreamOptions,
createDefaultAudioReceiveStreamOptions,
} from './AudioReceiveStream';
import { SpeakingMap } from './SpeakingMap';
import { SSRCMap } from './SSRCMap';
/**
* Attaches to a VoiceConnection, allowing you to receive audio packets from other
* users that are speaking.
*
* @beta
*/
export class VoiceReceiver {
/**
* The attached connection of this receiver.
*/
public readonly voiceConnection;
/**
* Maps SSRCs to Discord user ids.
*/
public readonly ssrcMap: SSRCMap;
/**
* The current audio subscriptions of this receiver.
*/
public readonly subscriptions: Map<string, AudioReceiveStream>;
/**
* The connection data of the receiver.
*
* @internal
*/
public connectionData: Partial<ConnectionData>;
/**
* The speaking map of the receiver.
*/
public readonly speaking: SpeakingMap;
public constructor(voiceConnection: VoiceConnection) {
this.voiceConnection = voiceConnection;
this.ssrcMap = new SSRCMap();
this.speaking = new SpeakingMap();
this.subscriptions = new Map();
this.connectionData = {};
this.onWsPacket = this.onWsPacket.bind(this);
this.onUdpMessage = this.onUdpMessage.bind(this);
}
/**
* Called when a packet is received on the attached connection's WebSocket.
*
* @param packet The received packet
*
* @internal
*/
public onWsPacket(packet: any) {
if (packet.op === VoiceOpcodes.ClientDisconnect && typeof packet.d?.user_id === 'string') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
this.ssrcMap.delete(packet.d.user_id);
} else if (
packet.op === VoiceOpcodes.Speaking &&
typeof packet.d?.user_id === 'string' &&
typeof packet.d?.ssrc === 'number'
) {
this.ssrcMap.update({ userId: packet.d.user_id, audioSSRC: packet.d.ssrc });
} else if (
packet.op === VoiceOpcodes.ClientConnect &&
typeof packet.d?.user_id === 'string' &&
typeof packet.d?.audio_ssrc === 'number'
) {
this.ssrcMap.update({
userId: packet.d.user_id,
audioSSRC: packet.d.audio_ssrc,
videoSSRC: packet.d.video_ssrc === 0 ? undefined : packet.d.video_ssrc,
});
}
}
private decrypt(buffer: Buffer, mode: string, nonce: Buffer, secretKey: Uint8Array) {
// Choose correct nonce depending on encryption
let end;
if (mode === 'xsalsa20_poly1305_lite') {
buffer.copy(nonce, 0, buffer.length - 4);
end = buffer.length - 4;
} else if (mode === 'xsalsa20_poly1305_suffix') {
buffer.copy(nonce, 0, buffer.length - 24);
end = buffer.length - 24;
} else {
buffer.copy(nonce, 0, 0, 12);
}
// Open packet
const decrypted = methods.open(buffer.slice(12, end), nonce, secretKey);
if (!decrypted) return;
return Buffer.from(decrypted);
}
/**
* Parses an audio packet, decrypting it to yield an Opus packet.
*
* @param buffer The buffer to parse
* @param mode The encryption mode
* @param nonce The nonce buffer used by the connection for encryption
* @param secretKey The secret key used by the connection for encryption
*
* @returns The parsed Opus packet
*/
private parsePacket(buffer: Buffer, mode: string, nonce: Buffer, secretKey: Uint8Array) {
let packet = this.decrypt(buffer, mode, nonce, secretKey);
if (!packet) return;
// Strip RTP Header Extensions (one-byte only)
if (packet[0] === 0xbe && packet[1] === 0xde && packet.length > 4) {
const headerExtensionLength = packet.readUInt16BE(2);
let offset = 4;
for (let i = 0; i < headerExtensionLength; i++) {
const byte = packet[offset];
offset++;
if (byte === 0) continue;
offset += 1 + (byte >> 4);
}
// Skip over undocumented Discord byte (if present)
const byte = packet.readUInt8(offset);
if (byte === 0x00 || byte === 0x02) offset++;
packet = packet.slice(offset);
}
return packet;
}
/**
* Called when the UDP socket of the attached connection receives a message.
*
* @param msg The received message
*
* @internal
*/
public onUdpMessage(msg: Buffer) {
if (msg.length <= 8) return;
const ssrc = msg.readUInt32BE(8);
const userData = this.ssrcMap.get(ssrc);
if (!userData) return;
this.speaking.onPacket(userData.userId);
const stream = this.subscriptions.get(userData.userId);
if (!stream) return;
if (this.connectionData.encryptionMode && this.connectionData.nonceBuffer && this.connectionData.secretKey) {
const packet = this.parsePacket(
msg,
this.connectionData.encryptionMode,
this.connectionData.nonceBuffer,
this.connectionData.secretKey,
);
if (packet) {
stream.push(packet);
} else {
stream.destroy(new Error('Failed to parse packet'));
}
}
}
/**
* Creates a subscription for the given user id.
*
* @param target The id of the user to subscribe to
*
* @returns A readable stream of Opus packets received from the target
*/
public subscribe(userId: string, options?: Partial<AudioReceiveStreamOptions>) {
const existing = this.subscriptions.get(userId);
if (existing) return existing;
const stream = new AudioReceiveStream({
...createDefaultAudioReceiveStreamOptions(),
...options,
});
stream.once('close', () => this.subscriptions.delete(userId));
this.subscriptions.set(userId, stream);
return stream;
}
}

View File

@@ -0,0 +1,72 @@
import { SILENCE_FRAME } from '../../audio/AudioPlayer';
import { AudioReceiveStream, EndBehaviorType } from '../AudioReceiveStream';
const DUMMY_BUFFER = Buffer.allocUnsafe(16);
function wait(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function stepSilence(stream: AudioReceiveStream, increment: number) {
stream.push(SILENCE_FRAME);
await wait(increment);
expect(stream.readable).toBe(true);
}
describe('AudioReceiveStream', () => {
test('Manual end behavior', async () => {
const stream = new AudioReceiveStream({ end: { behavior: EndBehaviorType.Manual } });
stream.push(DUMMY_BUFFER);
expect(stream.readable).toBe(true);
await wait(200);
stream.push(DUMMY_BUFFER);
expect(stream.readable).toBe(true);
});
// TODO: Fix this test
// test('AfterSilence end behavior', async () => {
// const duration = 100;
// const increment = 20;
// const stream = new AudioReceiveStream({ end: { behavior: EndBehaviorType.AfterSilence, duration: 100 } });
// stream.resume();
// for (let i = increment; i < duration / 2; i += increment) {
// await stepSilence(stream, increment);
// }
// stream.push(DUMMY_BUFFER);
// for (let i = increment; i < duration; i += increment) {
// await stepSilence(stream, increment);
// }
// await wait(increment);
// expect(stream.readableEnded).toBe(true);
// });
test('AfterInactivity end behavior', async () => {
const duration = 100;
const increment = 20;
const stream = new AudioReceiveStream({ end: { behavior: EndBehaviorType.AfterInactivity, duration: 100 } });
stream.resume();
for (let i = increment; i < duration / 2; i += increment) {
await stepSilence(stream, increment);
}
stream.push(DUMMY_BUFFER);
for (let i = increment; i < duration; i += increment) {
await stepSilence(stream, increment);
}
await wait(increment);
expect(stream.readableEnded).toBe(false);
await wait(duration - increment);
expect(stream.readableEnded).toBe(true);
});
});

View File

@@ -0,0 +1,59 @@
import EventEmitter, { once } from 'node:events';
import { SSRCMap, VoiceUserData } from '../SSRCMap';
function onceOrThrow<T extends EventEmitter>(target: T, event: string, after: number) {
return new Promise((resolve, reject) => {
target.on(event, resolve);
setTimeout(() => reject(new Error('Time up')), after);
});
}
describe('SSRCMap', () => {
test('update persists data and emits correctly', async () => {
const fixture1: VoiceUserData = {
audioSSRC: 1,
userId: '123',
};
const fixture2: VoiceUserData = {
...fixture1,
videoSSRC: 2,
};
const map = new SSRCMap();
process.nextTick(() => map.update(fixture1));
let [oldData, newData] = await once(map, 'update');
expect(oldData).toBeUndefined();
expect(newData).toMatchObject(fixture1);
expect(map.get(fixture1.audioSSRC)).toMatchObject(fixture1);
process.nextTick(() => map.update(fixture2));
[oldData, newData] = await once(map, 'update');
expect(oldData).toMatchObject(fixture1);
expect(newData).toMatchObject(fixture2);
expect(map.get(fixture1.userId)).toMatchObject(fixture2);
});
test('delete removes data and emits correctly', async () => {
const fixture1: VoiceUserData = {
audioSSRC: 1,
userId: '123',
};
const map = new SSRCMap();
map.delete(fixture1.audioSSRC);
await expect(onceOrThrow(map, 'delete', 5)).rejects.toThrow();
map.update(fixture1);
process.nextTick(() => map.delete(fixture1.audioSSRC));
await expect(once(map, 'delete')).resolves.toMatchObject([fixture1]);
map.delete(fixture1.audioSSRC);
await expect(onceOrThrow(map, 'delete', 5)).rejects.toThrow();
map.update(fixture1);
process.nextTick(() => map.delete(fixture1.userId));
await expect(once(map, 'delete')).resolves.toMatchObject([fixture1]);
expect(map.get(fixture1.audioSSRC)).toBeUndefined();
});
});

View File

@@ -0,0 +1,32 @@
import { noop } from '../../util/util';
import { SpeakingMap } from '../SpeakingMap';
jest.useFakeTimers();
describe('SpeakingMap', () => {
test('Emits start and end', () => {
const speaking = new SpeakingMap();
const userId = '123';
const starts: string[] = [];
const ends: string[] = [];
speaking.on('start', (userId) => void starts.push(userId));
speaking.on('end', (userId) => void ends.push(userId));
for (let i = 0; i < 10; i++) {
speaking.onPacket(userId);
setTimeout(noop, SpeakingMap.DELAY / 2);
jest.advanceTimersToNextTimer();
expect(starts).toEqual([userId]);
expect(ends).toEqual([]);
}
jest.advanceTimersToNextTimer();
expect(ends).toEqual([userId]);
speaking.onPacket(userId);
jest.advanceTimersToNextTimer();
expect(starts).toEqual([userId, userId]);
});
});

View File

@@ -0,0 +1,209 @@
/* eslint-disable @typescript-eslint/dot-notation */
import { VoiceReceiver } from '../VoiceReceiver';
import { VoiceConnection as _VoiceConnection, VoiceConnectionStatus } from '../../VoiceConnection';
import { RTP_PACKET_DESKTOP, RTP_PACKET_CHROME, RTP_PACKET_ANDROID } from './fixtures/rtp';
import { once } from 'node:events';
import { VoiceOpcodes } from 'discord-api-types/voice/v4';
import { methods } from '../../util/Secretbox';
jest.mock('../../VoiceConnection');
jest.mock('../SSRCMap');
const openSpy = jest.spyOn(methods, 'open');
openSpy.mockImplementation((buffer) => buffer);
const VoiceConnection = _VoiceConnection as unknown as jest.Mocked<typeof _VoiceConnection>;
function nextTick() {
return new Promise((resolve) => process.nextTick(resolve));
}
function* rangeIter(start: number, end: number) {
for (let i = start; i <= end; i++) {
yield i;
}
}
function range(start: number, end: number) {
return Buffer.from([...rangeIter(start, end)]);
}
describe('VoiceReceiver', () => {
let voiceConnection: _VoiceConnection;
let receiver: VoiceReceiver;
beforeEach(() => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
voiceConnection = new VoiceConnection({} as any, {} as any);
voiceConnection.state = {
status: VoiceConnectionStatus.Signalling,
} as any;
receiver = new VoiceReceiver(voiceConnection);
receiver['connectionData'] = {
encryptionMode: 'dummy',
nonceBuffer: Buffer.alloc(0),
secretKey: Buffer.alloc(0),
};
});
test.each([
['RTP Packet Desktop', RTP_PACKET_DESKTOP],
['RTP Packet Chrome', RTP_PACKET_CHROME],
['RTP Packet Android', RTP_PACKET_ANDROID],
])('onUdpMessage: %s', async (testName, RTP_PACKET) => {
receiver['decrypt'] = jest.fn().mockImplementationOnce(() => RTP_PACKET.decrypted);
const spy = jest.spyOn(receiver.ssrcMap, 'get');
spy.mockImplementation(() => ({
audioSSRC: RTP_PACKET.ssrc,
userId: '123',
}));
const stream = receiver.subscribe('123');
receiver['onUdpMessage'](RTP_PACKET.packet);
await nextTick();
expect(stream.read()).toEqual(RTP_PACKET.opusFrame);
});
test('onUdpMessage: <8 bytes packet', () => {
expect(() => receiver['onUdpMessage'](Buffer.alloc(4))).not.toThrow();
});
test('onUdpMessage: destroys stream on decrypt failure', async () => {
receiver['decrypt'] = jest.fn().mockImplementationOnce(() => null);
const spy = jest.spyOn(receiver.ssrcMap, 'get');
spy.mockImplementation(() => ({
audioSSRC: RTP_PACKET_DESKTOP.ssrc,
userId: '123',
}));
const stream = receiver.subscribe('123');
const errorEvent = once(stream, 'error');
receiver['onUdpMessage'](RTP_PACKET_DESKTOP.packet);
await nextTick();
await expect(errorEvent).resolves.toMatchObject([expect.any(Error)]);
expect(receiver.subscriptions.size).toBe(0);
});
test('subscribe: only allows one subscribe stream per SSRC', () => {
const spy = jest.spyOn(receiver.ssrcMap, 'get');
spy.mockImplementation(() => ({
audioSSRC: RTP_PACKET_DESKTOP.ssrc,
userId: '123',
}));
const stream = receiver.subscribe('123');
expect(receiver.subscribe('123')).toBe(stream);
});
describe('onWsPacket', () => {
test('CLIENT_DISCONNECT packet', () => {
const spy = jest.spyOn(receiver.ssrcMap, 'delete');
receiver['onWsPacket']({
op: VoiceOpcodes.ClientDisconnect,
d: {
user_id: '123abc',
},
});
expect(spy).toHaveBeenCalledWith('123abc');
});
test('SPEAKING packet', () => {
const spy = jest.spyOn(receiver.ssrcMap, 'update');
receiver['onWsPacket']({
op: VoiceOpcodes.Speaking,
d: {
ssrc: 123,
user_id: '123abc',
speaking: 1,
},
});
expect(spy).toHaveBeenCalledWith({
audioSSRC: 123,
userId: '123abc',
});
});
test('CLIENT_CONNECT packet', () => {
const spy = jest.spyOn(receiver.ssrcMap, 'update');
receiver['onWsPacket']({
op: VoiceOpcodes.ClientConnect,
d: {
audio_ssrc: 123,
video_ssrc: 43,
user_id: '123abc',
},
});
expect(spy).toHaveBeenCalledWith({
audioSSRC: 123,
videoSSRC: 43,
userId: '123abc',
});
receiver['onWsPacket']({
op: VoiceOpcodes.ClientConnect,
d: {
audio_ssrc: 123,
video_ssrc: 0,
user_id: '123abc',
},
});
expect(spy).toHaveBeenCalledWith({
audioSSRC: 123,
videoSSRC: undefined,
userId: '123abc',
});
});
});
describe('decrypt', () => {
const secretKey = new Uint8Array([1, 2, 3, 4]);
beforeEach(() => {
openSpy.mockClear();
});
test('decrypt: xsalsa20_poly1305_lite', () => {
// Arrange
const buffer = range(1, 32);
const nonce = Buffer.alloc(4);
// Act
const decrypted = receiver['decrypt'](buffer, 'xsalsa20_poly1305_lite', nonce, secretKey);
// Assert
expect(nonce.equals(range(29, 32))).toBe(true);
expect(decrypted.equals(range(13, 28))).toBe(true);
});
test('decrypt: xsalsa20_poly1305_suffix', () => {
// Arrange
const buffer = range(1, 64);
const nonce = Buffer.alloc(24);
// Act
const decrypted = receiver['decrypt'](buffer, 'xsalsa20_poly1305_suffix', nonce, secretKey);
// Assert
expect(nonce.equals(range(41, 64))).toBe(true);
expect(decrypted.equals(range(13, 40))).toBe(true);
});
test('decrypt: xsalsa20_poly1305', () => {
// Arrange
const buffer = range(1, 64);
const nonce = Buffer.alloc(12);
// Act
const decrypted = receiver['decrypt'](buffer, 'xsalsa20_poly1305', nonce, secretKey);
// Assert
expect(nonce.equals(range(1, 12))).toBe(true);
expect(decrypted.equals(range(13, 64))).toBe(true);
});
});
});

View File

@@ -0,0 +1,31 @@
export const RTP_PACKET_DESKTOP = {
ssrc: 341124,
packet: Buffer.from([
0x90, 0x78, 0x27, 0xe9, 0xf7, 0xcb, 0xbc, 0xd1, 0x0, 0x5, 0x34, 0x84, 0x8a, 0xbb, 0xe2, 0x97, 0x21, 0x9f, 0x1f,
0x67, 0xcd, 0x17, 0x91, 0x56, 0x43, 0xa0, 0x98, 0xfd, 0xa9, 0x25, 0x81, 0x63, 0x13, 0xb4, 0x1e, 0xae, 0x88, 0xe4,
0x0, 0xed, 0x0, 0x0, 0x0,
]),
decrypted: Buffer.from([0xbe, 0xde, 0x0, 0x1, 0x10, 0xff, 0x90, 0x0, 0xf8, 0xff, 0xfe]),
opusFrame: Buffer.from([0xf8, 0xff, 0xfe]),
};
export const RTP_PACKET_CHROME = {
ssrc: 172360,
packet: Buffer.from([
0x80, 0x78, 0x46, 0xdf, 0x27, 0x59, 0x2a, 0xd7, 0x0, 0x2, 0xa1, 0x48, 0x42, 0x9e, 0x53, 0xec, 0x73, 0xc1, 0x71,
0x22, 0x71, 0x60, 0x90, 0xff, 0x1b, 0x20, 0x47, 0x2c, 0xdc, 0x86, 0xc4, 0x9a, 0x0, 0x0, 0x0,
]),
decrypted: Buffer.from([0xf8, 0xff, 0xfe]),
opusFrame: Buffer.from([0xf8, 0xff, 0xfe]),
};
export const RTP_PACKET_ANDROID = {
ssrc: 172596,
packet: Buffer.from([
0x90, 0x78, 0x39, 0xd0, 0xe0, 0x59, 0xf5, 0x47, 0x0, 0x2, 0xa2, 0x34, 0x12, 0x6d, 0x87, 0x56, 0x25, 0xc8, 0x3e,
0x96, 0xc0, 0x71, 0x9a, 0x1, 0x83, 0xe, 0x1, 0x62, 0x91, 0x95, 0x1f, 0x76, 0x57, 0x15, 0x41, 0xab, 0xee, 0x5b, 0xac,
0x8b, 0x0, 0x0, 0x0,
]),
decrypted: Buffer.from([0xbe, 0xde, 0x0, 0x1, 0x10, 0xff, 0x90, 0x0, 0xf8, 0xff, 0xfe]),
opusFrame: Buffer.from([0xf8, 0xff, 0xfe]),
};

View File

@@ -0,0 +1,4 @@
export * from './VoiceReceiver';
export * from './SSRCMap';
export * from './AudioReceiveStream';
export * from './SpeakingMap';

View File

@@ -0,0 +1,56 @@
interface Methods {
open(buffer: Buffer, nonce: Buffer, secretKey: Uint8Array): Buffer | null;
close(opusPacket: Buffer, nonce: Buffer, secretKey: Uint8Array): Buffer;
random(bytes: number, nonce: Buffer): Buffer;
}
const libs = {
sodium: (sodium: any): Methods => ({
open: sodium.api.crypto_secretbox_open_easy,
close: sodium.api.crypto_secretbox_easy,
random: (n: any, buffer?: Buffer) => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
if (!buffer) buffer = Buffer.allocUnsafe(n);
sodium.api.randombytes_buf(buffer);
return buffer;
},
}),
'libsodium-wrappers': (sodium: any): Methods => ({
open: sodium.crypto_secretbox_open_easy,
close: sodium.crypto_secretbox_easy,
random: (n: any) => sodium.randombytes_buf(n),
}),
tweetnacl: (tweetnacl: any): Methods => ({
open: tweetnacl.secretbox.open,
close: tweetnacl.secretbox,
random: (n: any) => tweetnacl.randomBytes(n),
}),
} as const;
const fallbackError = () => {
throw new Error(
`Cannot play audio as no valid encryption package is installed.
- Install sodium, libsodium-wrappers, or tweetnacl.
- Use the generateDependencyReport() function for more information.\n`,
);
};
const methods: Methods = {
open: fallbackError,
close: fallbackError,
random: fallbackError,
};
void (async () => {
for (const libName of Object.keys(libs) as (keyof typeof libs)[]) {
try {
// eslint-disable-next-line
const lib = require(libName);
if (libName === 'libsodium-wrappers' && lib.ready) await lib.ready;
Object.assign(methods, libs[libName](lib));
break;
} catch {}
}
})();
export { methods };

View File

@@ -0,0 +1,16 @@
import { methods } from '../Secretbox';
jest.mock(
'tweetnacl',
() => ({
secretbox: {
// eslint-disable-next-line @typescript-eslint/no-empty-function
open() {},
},
}),
{ virtual: true },
);
test('Does not throw error with a package installed', () => {
// @ts-expect-error
expect(() => methods.open()).not.toThrowError();
});

View File

@@ -0,0 +1,24 @@
import { abortAfter } from '../abortAfter';
jest.useFakeTimers();
const clearTimeoutSpy = jest.spyOn(global, 'clearTimeout');
describe('abortAfter', () => {
test('Aborts after the given delay', () => {
const [ac, signal] = abortAfter(100);
expect(ac.signal).toBe(signal);
expect(signal.aborted).toBe(false);
jest.runAllTimers();
expect(signal.aborted).toBe(true);
});
test('Cleans up when manually aborted', () => {
const [ac, signal] = abortAfter(100);
expect(ac.signal).toBe(signal);
expect(signal.aborted).toBe(false);
clearTimeoutSpy.mockClear();
ac.abort();
expect(clearTimeoutSpy).toHaveBeenCalledTimes(1);
});
});

View File

@@ -0,0 +1,122 @@
import { demuxProbe } from '../demuxProbe';
import { opus as _opus } from 'prism-media';
import { Readable } from 'node:stream';
import { StreamType } from '../../audio';
import EventEmitter, { once } from 'node:events';
jest.mock('prism-media');
const WebmDemuxer = _opus.WebmDemuxer as unknown as jest.Mock<_opus.WebmDemuxer>;
const OggDemuxer = _opus.OggDemuxer as unknown as jest.Mock<_opus.OggDemuxer>;
async function* gen(n: number) {
for (let i = 0; i < n; i++) {
yield Buffer.from([i]);
await nextTick();
}
}
function range(n: number) {
return Buffer.from(Array.from(Array(n).keys()));
}
const validHead = Buffer.from([
0x4f, 0x70, 0x75, 0x73, 0x48, 0x65, 0x61, 0x64, 0x01, 0x02, 0x38, 0x01, 0x80, 0xbb, 0, 0, 0, 0, 0,
]);
const invalidHead = Buffer.from([
0x4f, 0x70, 0x75, 0x73, 0x48, 0x65, 0x61, 0x64, 0x01, 0x01, 0x38, 0x01, 0x80, 0xbb, 0, 0, 0, 0, 0,
]);
async function collectStream(stream: Readable): Promise<Buffer> {
let output = Buffer.alloc(0);
await once(stream, 'readable');
for await (const data of stream) {
output = Buffer.concat([output, data]);
}
return output;
}
function nextTick() {
return new Promise((resolve) => process.nextTick(resolve));
}
describe('demuxProbe', () => {
const webmWrite: jest.Mock<(buffer: Buffer) => void> = jest.fn();
const oggWrite: jest.Mock<(buffer: Buffer) => void> = jest.fn();
beforeAll(() => {
WebmDemuxer.prototype = {
...WebmDemuxer,
...EventEmitter.prototype,
write: webmWrite,
};
OggDemuxer.prototype = {
...OggDemuxer,
...EventEmitter.prototype,
write: oggWrite,
};
});
beforeEach(() => {
webmWrite.mockReset();
oggWrite.mockReset();
});
test('Defaults to arbitrary', async () => {
const stream = Readable.from(gen(10), { objectMode: false });
const probe = await demuxProbe(stream);
expect(probe.type).toBe(StreamType.Arbitrary);
await expect(collectStream(probe.stream)).resolves.toEqual(range(10));
});
test('Detects WebM', async () => {
const stream = Readable.from(gen(10), { objectMode: false });
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
webmWrite.mockImplementation(function mock(data: Buffer) {
if (data[0] === 5) this.emit('head', validHead);
} as any);
const probe = await demuxProbe(stream);
expect(probe.type).toBe(StreamType.WebmOpus);
await expect(collectStream(probe.stream)).resolves.toEqual(range(10));
});
test('Detects Ogg', async () => {
const stream = Readable.from(gen(10), { objectMode: false });
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
oggWrite.mockImplementation(function mock(data: Buffer) {
if (data[0] === 5) this.emit('head', validHead);
} as any);
const probe = await demuxProbe(stream);
expect(probe.type).toBe(StreamType.OggOpus);
await expect(collectStream(probe.stream)).resolves.toEqual(range(10));
});
test('Rejects invalid OpusHead', async () => {
const stream = Readable.from(gen(10), { objectMode: false });
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
oggWrite.mockImplementation(function mock(data: Buffer) {
if (data[0] === 5) this.emit('head', invalidHead);
} as any);
const probe = await demuxProbe(stream);
expect(probe.type).toBe(StreamType.Arbitrary);
await expect(collectStream(probe.stream)).resolves.toEqual(range(10));
});
test('Gives up on larger streams', async () => {
const stream = Readable.from(gen(8192), { objectMode: false });
const probe = await demuxProbe(stream);
expect(probe.type).toBe(StreamType.Arbitrary);
await expect(collectStream(probe.stream)).resolves.toEqual(range(8192));
});
test('Propagates errors', async () => {
const testError = new Error('test error');
const stream = new Readable({
read() {
this.destroy(testError);
},
});
await expect(demuxProbe(stream)).rejects.toBe(testError);
});
});

View File

@@ -0,0 +1,54 @@
import EventEmitter from 'node:events';
import { VoiceConnection, VoiceConnectionStatus } from '../../VoiceConnection';
import { entersState } from '../entersState';
function createFakeVoiceConnection(status = VoiceConnectionStatus.Signalling) {
const vc = new EventEmitter() as any;
vc.state = { status };
return vc as VoiceConnection;
}
beforeEach(() => {
jest.useFakeTimers();
});
describe('entersState', () => {
test('Returns the target once the state has been entered before timeout', async () => {
jest.useRealTimers();
const vc = createFakeVoiceConnection();
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
process.nextTick(() => vc.emit(VoiceConnectionStatus.Ready, null as any, null as any));
const result = await entersState(vc, VoiceConnectionStatus.Ready, 1000);
expect(result).toBe(vc);
});
test('Rejects once the timeout is exceeded', async () => {
const vc = createFakeVoiceConnection();
const promise = entersState(vc, VoiceConnectionStatus.Ready, 1000);
jest.runAllTimers();
await expect(promise).rejects.toThrowError();
});
test('Returns the target once the state has been entered before signal is aborted', async () => {
jest.useRealTimers();
const vc = createFakeVoiceConnection();
const ac = new AbortController();
// eslint-disable-next-line @typescript-eslint/no-unsafe-argument
process.nextTick(() => vc.emit(VoiceConnectionStatus.Ready, null as any, null as any));
const result = await entersState(vc, VoiceConnectionStatus.Ready, ac.signal);
expect(result).toBe(vc);
});
test('Rejects once the signal is aborted', async () => {
const vc = createFakeVoiceConnection();
const ac = new AbortController();
const promise = entersState(vc, VoiceConnectionStatus.Ready, ac.signal);
ac.abort();
await expect(promise).rejects.toThrowError();
});
test('Resolves immediately when target already in desired state', async () => {
const vc = createFakeVoiceConnection();
await expect(entersState(vc, VoiceConnectionStatus.Signalling, 1000)).resolves.toBe(vc);
});
});

View File

@@ -0,0 +1,12 @@
/**
* Creates an abort controller that aborts after the given time.
*
* @param delay - The time in milliseconds to wait before aborting
*/
export function abortAfter(delay: number): [AbortController, AbortSignal] {
const ac = new AbortController();
const timeout = setTimeout(() => ac.abort(), delay);
// @ts-ignore
ac.signal.addEventListener('abort', () => clearTimeout(timeout));
return [ac, ac.signal];
}

View File

@@ -0,0 +1,53 @@
import type { GatewayVoiceServerUpdateDispatchData, GatewayVoiceStateUpdateDispatchData } from 'discord-api-types/v9';
/**
* Methods that are provided by the @discordjs/voice library to implementations of
* Discord gateway DiscordGatewayAdapters.
*/
export interface DiscordGatewayAdapterLibraryMethods {
/**
* Call this when you receive a VOICE_SERVER_UPDATE payload that is relevant to the adapter.
*
* @param data - The inner data of the VOICE_SERVER_UPDATE payload
*/
onVoiceServerUpdate(data: GatewayVoiceServerUpdateDispatchData): void;
/**
* Call this when you receive a VOICE_STATE_UPDATE payload that is relevant to the adapter.
*
* @param data - The inner data of the VOICE_STATE_UPDATE payload
*/
onVoiceStateUpdate(data: GatewayVoiceStateUpdateDispatchData): void;
/**
* Call this when the adapter can no longer be used (e.g. due to a disconnect from the main gateway)
*/
destroy(): void;
}
/**
* Methods that are provided by the implementer of a Discord gateway DiscordGatewayAdapter.
*/
export interface DiscordGatewayAdapterImplementerMethods {
/**
* Implement this method such that the given payload is sent to the main Discord gateway connection.
*
* @param payload - The payload to send to the main Discord gateway connection
*
* @returns `false` if the payload definitely failed to send - in this case, the voice connection disconnects
*/
sendPayload(payload: any): boolean;
/**
* This will be called by @discordjs/voice when the adapter can safely be destroyed as it will no
* longer be used.
*/
destroy(): void;
}
/**
* A function used to build adapters. It accepts a methods parameter that contains functions that
* can be called by the implementer when new data is received on its gateway connection. In return,
* the implementer will return some methods that the library can call - e.g. to send messages on
* the gateway, or to signal that the adapter can be removed.
*/
export type DiscordGatewayAdapterCreator = (
methods: DiscordGatewayAdapterLibraryMethods,
) => DiscordGatewayAdapterImplementerMethods;

View File

@@ -0,0 +1,118 @@
import { Readable } from 'node:stream';
import prism from 'prism-media';
import { noop } from './util';
import { StreamType } from '..';
/**
* Takes an Opus Head, and verifies whether the associated Opus audio is suitable to play in a Discord voice channel.
*
* @param opusHead The Opus Head to validate
*
* @returns `true` if suitable to play in a Discord voice channel, otherwise `false`
*/
export function validateDiscordOpusHead(opusHead: Buffer): boolean {
const channels = opusHead.readUInt8(9);
const sampleRate = opusHead.readUInt32LE(12);
return channels === 2 && sampleRate === 48000;
}
/**
* The resulting information after probing an audio stream
*/
export interface ProbeInfo {
/**
* The readable audio stream to use. You should use this rather than the input stream, as the probing
* function can sometimes read the input stream to its end and cause the stream to close.
*/
stream: Readable;
/**
* The recommended stream type for this audio stream.
*/
type: StreamType;
}
/**
* Attempt to probe a readable stream to figure out whether it can be demuxed using an Ogg or WebM Opus demuxer.
*
* @param stream The readable stream to probe
* @param probeSize The number of bytes to attempt to read before giving up on the probe
* @param validator The Opus Head validator function
*
* @experimental
*/
export function demuxProbe(
stream: Readable,
probeSize = 1024,
validator = validateDiscordOpusHead,
): Promise<ProbeInfo> {
return new Promise((resolve, reject) => {
// Preconditions
if (stream.readableObjectMode) reject(new Error('Cannot probe a readable stream in object mode'));
if (stream.readableEnded) reject(new Error('Cannot probe a stream that has ended'));
let readBuffer = Buffer.alloc(0);
let resolved: StreamType | undefined = undefined;
const finish = (type: StreamType) => {
stream.off('data', onData);
stream.off('close', onClose);
stream.off('end', onClose);
stream.pause();
resolved = type;
if (stream.readableEnded) {
resolve({
stream: Readable.from(readBuffer),
type,
});
} else {
if (readBuffer.length > 0) {
stream.push(readBuffer);
}
resolve({
stream,
type,
});
}
};
const foundHead = (type: StreamType) => (head: Buffer) => {
if (validator(head)) {
finish(type);
}
};
const webm = new prism.opus.WebmDemuxer();
webm.once('error', noop);
webm.on('head', foundHead(StreamType.WebmOpus));
const ogg = new prism.opus.OggDemuxer();
ogg.once('error', noop);
ogg.on('head', foundHead(StreamType.OggOpus));
const onClose = () => {
if (!resolved) {
finish(StreamType.Arbitrary);
}
};
const onData = (buffer: Buffer) => {
readBuffer = Buffer.concat([readBuffer, buffer]);
webm.write(buffer);
ogg.write(buffer);
if (readBuffer.length >= probeSize) {
stream.off('data', onData);
stream.pause();
process.nextTick(onClose);
}
};
stream.once('error', reject);
stream.on('data', onData);
stream.once('close', onClose);
stream.once('end', onClose);
});
}

View File

@@ -0,0 +1,54 @@
import type { VoiceConnection, VoiceConnectionStatus } from '../VoiceConnection';
import type { AudioPlayer, AudioPlayerStatus } from '../audio/AudioPlayer';
import { abortAfter } from './abortAfter';
import EventEmitter, { once } from 'node:events';
/**
* Allows a voice connection a specified amount of time to enter a given state, otherwise rejects with an error.
*
* @param target - The voice connection that we want to observe the state change for
* @param status - The status that the voice connection should be in
* @param timeoutOrSignal - The maximum time we are allowing for this to occur, or a signal that will abort the operation
*/
export function entersState(
target: VoiceConnection,
status: VoiceConnectionStatus,
timeoutOrSignal: number | AbortSignal,
): Promise<VoiceConnection>;
/**
* Allows an audio player a specified amount of time to enter a given state, otherwise rejects with an error.
*
* @param target - The audio player that we want to observe the state change for
* @param status - The status that the audio player should be in
* @param timeoutOrSignal - The maximum time we are allowing for this to occur, or a signal that will abort the operation
*/
export function entersState(
target: AudioPlayer,
status: AudioPlayerStatus,
timeoutOrSignal: number | AbortSignal,
): Promise<AudioPlayer>;
/**
* Allows a target a specified amount of time to enter a given state, otherwise rejects with an error.
*
* @param target - The object that we want to observe the state change for
* @param status - The status that the target should be in
* @param timeoutOrSignal - The maximum time we are allowing for this to occur, or a signal that will abort the operation
*/
export async function entersState<T extends VoiceConnection | AudioPlayer>(
target: T,
status: VoiceConnectionStatus | AudioPlayerStatus,
timeoutOrSignal: number | AbortSignal,
) {
if (target.state.status !== status) {
const [ac, signal] =
typeof timeoutOrSignal === 'number' ? abortAfter(timeoutOrSignal) : [undefined, timeoutOrSignal];
try {
await once(target as EventEmitter, status, { signal });
} finally {
ac?.abort();
}
}
return target;
}

View File

@@ -0,0 +1,83 @@
/* eslint-disable @typescript-eslint/no-var-requires */
/* eslint-disable @typescript-eslint/no-require-imports */
import { resolve, dirname } from 'node:path';
import prism from 'prism-media';
/**
* Generates a report of the dependencies used by the \@discordjs/voice module.
* Useful for debugging.
*/
export function generateDependencyReport() {
const report = [];
const addVersion = (name: string) => report.push(`- ${name}: ${version(name)}`);
// general
report.push('Core Dependencies');
addVersion('@discordjs/voice');
addVersion('prism-media');
report.push('');
// opus
report.push('Opus Libraries');
addVersion('@discordjs/opus');
addVersion('opusscript');
report.push('');
// encryption
report.push('Encryption Libraries');
addVersion('sodium');
addVersion('libsodium-wrappers');
addVersion('tweetnacl');
report.push('');
// ffmpeg
report.push('FFmpeg');
try {
const info = prism.FFmpeg.getInfo();
report.push(`- version: ${info.version}`);
report.push(`- libopus: ${info.output.includes('--enable-libopus') ? 'yes' : 'no'}`);
} catch (err) {
report.push('- not found');
}
return ['-'.repeat(50), ...report, '-'.repeat(50)].join('\n');
}
/**
* Tries to find the package.json file for a given module.
*
* @param dir - The directory to look in
* @param packageName - The name of the package to look for
* @param depth - The maximum recursion depth
*/
function findPackageJSON(
dir: string,
packageName: string,
depth: number,
): { name: string; version: string } | undefined {
if (depth === 0) return undefined;
const attemptedPath = resolve(dir, './package.json');
try {
const pkg = require(attemptedPath);
if (pkg.name !== packageName) throw new Error('package.json does not match');
return pkg;
} catch (err) {
return findPackageJSON(resolve(dir, '..'), packageName, depth - 1);
}
}
/**
* Tries to find the version of a dependency.
*
* @param name - The package to find the version of
*/
function version(name: string): string {
try {
const pkg =
name === '@discordjs/voice'
? require('../../package.json')
: findPackageJSON(dirname(require.resolve(name)), name, 3);
return pkg?.version ?? 'not found';
} catch (err) {
return 'not found';
}
}

View File

@@ -0,0 +1,4 @@
export * from './generateDependencyReport';
export * from './entersState';
export * from './adapter';
export * from './demuxProbe';

View File

@@ -0,0 +1,4 @@
// eslint-disable-next-line @typescript-eslint/no-empty-function
export const noop = () => {};
export type Awaited<T> = T | Promise<T>;

View File

@@ -0,0 +1,20 @@
{
"extends": "./tsconfig.json",
"compilerOptions": {
"allowJs": true
},
"include": [
"**/*.ts",
"**/*.tsx",
"**/*.js",
"**/*.mjs",
"**/*.jsx",
"**/*.test.ts",
"**/*.test.js",
"**/*.test.mjs",
"**/*.spec.ts",
"**/*.spec.js",
"**/*.spec.mjs"
],
"exclude": []
}

View File

@@ -0,0 +1,48 @@
{
// Mapped from https://www.typescriptlang.org/tsconfig
"compilerOptions": {
// Type Checking
"allowUnreachableCode": false,
"allowUnusedLabels": false,
// if true: conflicts with discord-api-types
"exactOptionalPropertyTypes": false,
"noFallthroughCasesInSwitch": true,
"noImplicitOverride": true,
"noImplicitReturns": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"strict": true,
"useUnknownInCatchVariables": true,
// Modules
"module": "CommonJS",
"moduleResolution": "node",
"resolveJsonModule": true,
// Emit
"declaration": true,
"importHelpers": true,
"importsNotUsedAsValues": "error",
"inlineSources": true,
"newLine": "lf",
"noEmitHelpers": true,
"outDir": "dist",
"preserveConstEnums": true,
"removeComments": false,
"sourceMap": true,
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
// Language and Environment
"emitDecoratorMetadata": true,
"experimentalDecorators": true,
"lib": ["ESNext"],
"target": "ES2020",
"useDefineForClassFields": true,
// Completeness
"skipLibCheck": true
},
"include": ["src/**/*.ts"],
"exclude": ["src/**/__tests__"]
}

View File

@@ -0,0 +1,12 @@
import { defineConfig } from 'tsup';
export default defineConfig({
clean: true,
dts: true,
entryPoints: ['src/index.ts'],
format: ['esm', 'cjs'],
minify: true,
skipNodeModulesBundle: true,
sourcemap: true,
target: 'es2021',
});