fix: events

This commit is contained in:
Yuzu 2022-06-21 19:59:41 -05:00
parent b1f8b0a5b5
commit 67970faf7b
59 changed files with 5248 additions and 5228 deletions

View File

@ -1,2 +1,3 @@
# biscuit # biscuit
A brand new bleeding edge non bloated Discord library A brand new bleeding edge non bloated Discord library

8
deno.json Normal file
View File

@ -0,0 +1,8 @@
{
"fmt": {
"options": {
"indentWidth": 4,
"lineWidth": 120
}
}
}

2
mod.ts
View File

@ -2,4 +2,4 @@ export * from "./session/mod.ts";
export * from "./util/mod.ts"; export * from "./util/mod.ts";
export * from "./structures/mod.ts"; export * from "./structures/mod.ts";
export * from "./vendor/external.ts"; export * from "./vendor/external.ts";
export * from "./handlers/mod.ts"; export * from "./handlers/mod.ts";

View File

@ -1,6 +1,9 @@
import type { import type { DiscordGatewayPayload, DiscordMessage, DiscordReady, Shard } from "../vendor/external.ts";
DiscordGatewayPayload,
Shard,
} from "../vendor/external.ts";
export type DiscordRawEventHandler = (shard: Shard, data: DiscordGatewayPayload) => unknown; export type DiscordRawEventHandler = (shard: Shard, data: DiscordGatewayPayload) => unknown;
export interface Events {
ready(payload: DiscordReady, shardId: number): unknown;
messageCreate(message: DiscordMessage): unknown;
raw(data: DiscordGatewayPayload, shardId: number): unknown;
}

View File

@ -1,129 +1,128 @@
import type { import type {
GatewayIntents, DiscordGatewayPayload,
DiscordGatewayPayload, DiscordGetGatewayBot,
DiscordGetGatewayBot, DiscordMessage,
DiscordReady, DiscordReady,
DiscordMessage, GatewayBot,
GatewayDispatchEventNames, GatewayDispatchEventNames,
GatewayBot, GatewayIntents,
Shard Shard,
} from "../vendor/external.ts"; } from "../vendor/external.ts";
import { import { EventEmitter, Routes, Snowflake } from "../util/mod.ts";
EventEmitter,
Snowflake,
Routes
} from "../util/mod.ts";
import type { import type { DiscordRawEventHandler, Events } from "./Events.ts";
DiscordRawEventHandler,
} from "./Events.ts";
import { import { createGatewayManager, createRestManager } from "../vendor/external.ts";
createRestManager,
createGatewayManager
} from "../vendor/external.ts";
export interface RestOptions { export interface RestOptions {
secretKey?: string; secretKey?: string;
applicationId?: Snowflake; applicationId?: Snowflake;
} }
export interface GatewayOptions { export interface GatewayOptions {
botId?: Snowflake; botId?: Snowflake;
data?: GatewayBot; data?: GatewayBot;
} }
export interface SessionOptions { export interface SessionOptions {
token: string; token: string;
rawHandler?: DiscordRawEventHandler; rawHandler?: DiscordRawEventHandler;
intents?: GatewayIntents; intents?: GatewayIntents;
rest?: RestOptions; rest?: RestOptions;
gateway?: GatewayOptions; gateway?: GatewayOptions;
} }
/** /**
* Receives a Token, connects * Receives a Token, connects
* */ */
export class Session extends EventEmitter { export class Session extends EventEmitter {
options: SessionOptions; options: SessionOptions;
// TODO: improve this with CreateShardManager etc // TODO: improve this with CreateShardManager etc
rest: ReturnType<typeof createRestManager>; rest?: ReturnType<typeof createRestManager>;
gateway: ReturnType<typeof createGatewayManager>; gateway?: ReturnType<typeof createGatewayManager>;
constructor(options: SessionOptions) { constructor(options: SessionOptions) {
super(); super();
this.options = options; this.options = options;
// TODO: set botId in Session.botId or something
}
const defHandler: DiscordRawEventHandler = (shard, data) => { /** TODO: move this */
this.emit("raw", data, shard.id); static #toSnakeCase(str: string) {
// probably not a fast implementation
return str.replace(/[A-Z]/g, (char) => "_" + char.toLowerCase());
}
if (!data.t) return; override on(event: "ready", func: Events["ready"]): this;
override on(event: "messageCreate", func: Events["messageCreate"]): this;
override on(event: "raw", func: Events["raw"]): this;
override on(event: keyof Events, func: Events[keyof Events]): this {
return super.on(event, func);
}
this.emit(data.t as GatewayDispatchEventNames, data, shard.id); override off(event: "ready", func: Events["ready"]): this;
}; override off(event: "messageCreate", func: Events["messageCreate"]): this;
override off(event: "raw", func: Events["raw"]): this;
override off(event: keyof Events, func: Events[keyof Events]): this {
return super.off(event, func);
}
this.rest = createRestManager({ override once(event: "ready", func: Events["ready"]): this;
token: this.options.token, override once(event: "messageCreate", func: Events["messageCreate"]): this;
debug: (text) => { override once(event: "raw", func: Events["raw"]): this;
// TODO: set this using the event emitter override once(event: keyof Events, func: Events[keyof Events]): this {
super.rawListeners("debug")?.forEach((fn) => fn(text)); return super.once(event, func);
}, }
secretKey: this.options.rest?.secretKey ?? undefined
});
this.gateway = createGatewayManager({ async start() {
gatewayBot: options.gateway?.data ?? {} as GatewayBot, // TODO const defHandler: DiscordRawEventHandler = (shard, data) => {
gatewayConfig: { this.emit("raw", data, shard.id);
token: options.token,
intents: options.intents
},
handleDiscordPayload: options.rawHandler ?? defHandler
});
// TODO: set botId in Session.botId or something if (!data.t) return;
}
override on(event: "ready", func: (payload: DiscordReady) => unknown): this; this.emit(data.t as GatewayDispatchEventNames, data, shard.id);
override on(event: "raw", func: (shard: Shard, data: DiscordGatewayPayload) => unknown): this; };
override on(event: "message", func: (message: DiscordMessage) => unknown): this;
override on(event: "debug", func: (text: string) => unknown): this;
override on(event: string, func: Function): this {
return super.on(event, func);
}
override off(event: string, func: Function): this { this.rest = createRestManager({
return super.off(event, func); token: this.options.token,
} debug: (text) => {
// TODO: set this using the event emitter
super.rawListeners("debug")?.forEach((fn) => fn(text));
},
secretKey: this.options.rest?.secretKey ?? undefined,
});
override once(event: string, func: Function): this { this.gateway = createGatewayManager({
return super.once(event, func); gatewayBot: this.options.gateway?.data ?? {} as GatewayBot, // TODO
} gatewayConfig: {
token: this.options.token,
intents: this.options.intents,
},
handleDiscordPayload: this.options.rawHandler ?? defHandler,
});
async start() { const getGatewayBot = () => this.rest!.runMethod<DiscordGetGatewayBot>(this.rest!, "GET", Routes.GATEWAY_BOT());
const getGatewayBot = () => this.rest.runMethod<DiscordGetGatewayBot>(this.rest, "GET", Routes.GATEWAY_BOT());
// check if is empty // check if is empty
if (!Object.keys(this.options.gateway?.data ?? {}).length) { if (!Object.keys(this.options.gateway?.data ?? {}).length) {
const nonParsed = await getGatewayBot(); const nonParsed = await getGatewayBot();
this.gateway.gatewayBot = { this.gateway.gatewayBot = {
url: nonParsed.url, url: nonParsed.url,
shards: nonParsed.shards, shards: nonParsed.shards,
sessionStartLimit: { sessionStartLimit: {
total: nonParsed.session_start_limit.total, total: nonParsed.session_start_limit.total,
remaining: nonParsed.session_start_limit.remaining, remaining: nonParsed.session_start_limit.remaining,
resetAfter: nonParsed.session_start_limit.reset_after, resetAfter: nonParsed.session_start_limit.reset_after,
maxConcurrency: nonParsed.session_start_limit.max_concurrency, maxConcurrency: nonParsed.session_start_limit.max_concurrency,
}, },
}; };
this.gateway.lastShardId = this.gateway.gatewayBot.shards - 1; this.gateway.lastShardId = this.gateway.gatewayBot.shards - 1;
this.gateway.manager.totalShards = this.gateway.gatewayBot.shards; this.gateway.manager.totalShards = this.gateway.gatewayBot.shards;
} }
this.gateway.spawnShards(); this.gateway.spawnShards();
} }
} }

View File

@ -1,2 +1,2 @@
export * from "./Session.ts"; export * from "./Session.ts";
export * from "./Events.ts"; export * from "./Events.ts";

View File

@ -1 +1 @@
export * from "../mod.ts"; export * from "../mod.ts";

View File

@ -1,11 +1,19 @@
import * as Discord from "./deps.ts"; import * as Discord from "./deps.ts";
if (!Deno.args[0]) {
throw new Error("Please provide a token");
}
const session = new Discord.Session({ const session = new Discord.Session({
token: Deno.args[0], token: Deno.args[0],
intents: Discord.GatewayIntents.MessageContent | Discord.GatewayIntents.Guilds |
Discord.GatewayIntents.GuildMessages,
}); });
session.on("ready", (payload) => console.log(payload)); session.on("ready", (payload) => console.log(payload));
session.on("raw", (shard, data) => console.log(shard, data)); session.on("message", (payload) => console.log(payload));
session.on("debug", (text) => console.log(text)); // session.on("raw", (data, shardId) => console.log(shardId, data));
session.start(); console.log("hello");
session.start();

View File

@ -1,77 +1,74 @@
// deno-lint-ignore-file ban-types // deno-lint-ignore-file ban-types
/** /**
* An event emitter (observer pattern) * An event emitter (observer pattern)
* */ */
export class EventEmitter { export class EventEmitter {
listeners = new Map<PropertyKey, Function[]>; listeners = new Map<PropertyKey, Function[]>();
#addListener(event: string, func: Function) { #addListener(event: string, func: Function) {
this.listeners.set(event, this.listeners.get(event) || []); this.listeners.set(event, this.listeners.get(event) || []);
this.listeners.get(event)?.push(func); this.listeners.get(event)?.push(func);
return this; return this;
} }
on(event: string, func: Function) { on(event: string, func: Function) {
return this.#addListener(event, func); return this.#addListener(event, func);
} }
#removeListener(event: string, func: Function) { #removeListener(event: string, func: Function) {
if (this.listeners.has(event)) { if (this.listeners.has(event)) {
const listener = this.listeners.get(event); const listener = this.listeners.get(event);
if (listener?.includes(func)) { if (listener?.includes(func)) {
listener.splice(listener.indexOf(func), 1); listener.splice(listener.indexOf(func), 1);
if (listener.length === 0) { if (listener.length === 0) {
this.listeners.delete(event); this.listeners.delete(event);
}
} }
} }
}
return this; return this;
} }
off(event: string, func: Function) { off(event: string, func: Function) {
return this.#removeListener(event, func); return this.#removeListener(event, func);
} }
once(event: string, func: Function) { once(event: string, func: Function) {
// it is important for this to be an arrow function // it is important for this to be an arrow function
const closure = () => { const closure = () => {
func(); func();
this.off(event, func); this.off(event, func);
} };
const listener = this.listeners.get(event) ?? []; const listener = this.listeners.get(event) ?? [];
listener.push(closure); listener.push(closure);
return this; return this;
} }
emit(event: string, ...args: unknown[]) { emit(event: string, ...args: unknown[]) {
const listener = this.listeners.get(event); const listener = this.listeners.get(event);
if (!listener) { if (!listener) {
return false; return false;
} }
listener.forEach((f) => f(...args)); listener.forEach((f) => f(...args));
return true; return true;
} }
listenerCount(eventName: string) { listenerCount(eventName: string) {
return this.listeners.get(eventName)?.length ?? 0; return this.listeners.get(eventName)?.length ?? 0;
} }
rawListeners(eventName: string): Function[] | undefined {
return this.listeners.get(eventName);
}
rawListeners(eventName: string): Function[] | undefined {
return this.listeners.get(eventName);
}
} }
export default EventEmitter;
export default EventEmitter;

View File

@ -1,3 +1,3 @@
export function GATEWAY_BOT() { export function GATEWAY_BOT() {
return "/gateway/bot"; return "/gateway/bot";
} }

View File

@ -5,7 +5,7 @@ export const DiscordEpoch = 14200704e5;
// utilities for Snowflakes // utilities for Snowflakes
export const Snowflake = { export const Snowflake = {
snowflakeToTimestamp(id: Snowflake) { snowflakeToTimestamp(id: Snowflake) {
return (Number(id) >> 22) + DiscordEpoch; return (Number(id) >> 22) + DiscordEpoch;
} },
} };

View File

@ -1,3 +1,3 @@
export * from "./EventEmmiter.ts"; export * from "./EventEmmiter.ts";
export * from "./Snowflake.ts"; export * from "./Snowflake.ts";
export * as Routes from "./Routes.ts"; export * as Routes from "./Routes.ts";

View File

@ -52,7 +52,7 @@ This WS service is meant for ADVANCED DEVELOPERS ONLY!
```ts ```ts
createGatewayManager({ createGatewayManager({
// TODO: (docs) Fill this out // TODO: (docs) Fill this out
}); });
``` ```

View File

@ -1,7 +1,7 @@
import { GatewayManager } from "./manager/gatewayManager.ts"; import { GatewayManager } from "./manager/gatewayManager.ts";
export function calculateShardId(gateway: GatewayManager, guildId: bigint) { export function calculateShardId(gateway: GatewayManager, guildId: bigint) {
if (gateway.manager.totalShards === 1) return 0; if (gateway.manager.totalShards === 1) return 0;
return Number((guildId >> 22n) % BigInt(gateway.manager.totalShards - 1)); return Number((guildId >> 22n) % BigInt(gateway.manager.totalShards - 1));
} }

View File

@ -2,15 +2,15 @@ import { GatewayManager } from "./gatewayManager.ts";
/** Handler used to determine max number of shards to use based upon the max concurrency. */ /** Handler used to determine max number of shards to use based upon the max concurrency. */
export function calculateTotalShards(gateway: GatewayManager): number { export function calculateTotalShards(gateway: GatewayManager): number {
// Bots under 100k servers do not have access to total shards. // Bots under 100k servers do not have access to total shards.
if (gateway.manager.totalShards < 100) return gateway.manager.totalShards; if (gateway.manager.totalShards < 100) return gateway.manager.totalShards;
// Calculate a multiple of `maxConcurrency` which can be used to connect to the gateway. // Calculate a multiple of `maxConcurrency` which can be used to connect to the gateway.
return Math.ceil( return Math.ceil(
gateway.manager.totalShards / gateway.manager.totalShards /
// If `maxConcurrency` is 1 we can safely use 16. // If `maxConcurrency` is 1 we can safely use 16.
(gateway.gatewayBot.sessionStartLimit.maxConcurrency === 1 (gateway.gatewayBot.sessionStartLimit.maxConcurrency === 1
? 16 ? 16
: gateway.gatewayBot.sessionStartLimit.maxConcurrency), : gateway.gatewayBot.sessionStartLimit.maxConcurrency),
) * gateway.gatewayBot.sessionStartLimit.maxConcurrency; ) * gateway.gatewayBot.sessionStartLimit.maxConcurrency;
} }

View File

@ -1,13 +1,13 @@
import { GatewayManager } from "./gatewayManager.ts"; import { GatewayManager } from "./gatewayManager.ts";
export function calculateWorkerId(manager: GatewayManager, shardId: number) { export function calculateWorkerId(manager: GatewayManager, shardId: number) {
// Ignore decimal numbers. // Ignore decimal numbers.
let workerId = Math.floor((shardId) / manager.shardsPerWorker); let workerId = Math.floor((shardId) / manager.shardsPerWorker);
// If the workerId overflows the maximal allowed workers we by default just use to last worker. // If the workerId overflows the maximal allowed workers we by default just use to last worker.
if (workerId >= manager.totalWorkers) { if (workerId >= manager.totalWorkers) {
// The Id of the last available worker is total -1 // The Id of the last available worker is total -1
workerId = manager.totalWorkers - 1; workerId = manager.totalWorkers - 1;
} }
return workerId; return workerId;
} }

View File

@ -27,268 +27,268 @@ export type GatewayManager = ReturnType<typeof createGatewayManager>;
* bots. * bots.
*/ */
export function createGatewayManager( export function createGatewayManager(
options: PickPartial<CreateGatewayManager, "handleDiscordPayload" | "gatewayBot" | "gatewayConfig">, options: PickPartial<CreateGatewayManager, "handleDiscordPayload" | "gatewayBot" | "gatewayConfig">,
) { ) {
const prepareBucketsOverwritten = options.prepareBuckets ?? prepareBuckets; const prepareBucketsOverwritten = options.prepareBuckets ?? prepareBuckets;
const spawnShardsOverwritten = options.spawnShards ?? spawnShards; const spawnShardsOverwritten = options.spawnShards ?? spawnShards;
const stopOverwritten = options.stop ?? stop; const stopOverwritten = options.stop ?? stop;
const tellWorkerToIdentifyOverwritten = options.tellWorkerToIdentify ?? tellWorkerToIdentify; const tellWorkerToIdentifyOverwritten = options.tellWorkerToIdentify ?? tellWorkerToIdentify;
const calculateTotalShardsOverwritten = options.calculateTotalShards ?? calculateTotalShards; const calculateTotalShardsOverwritten = options.calculateTotalShards ?? calculateTotalShards;
const calculateWorkerIdOverwritten = options.calculateWorkerId ?? calculateWorkerId; const calculateWorkerIdOverwritten = options.calculateWorkerId ?? calculateWorkerId;
const totalShards = options.totalShards ?? options.gatewayBot.shards ?? 1; const totalShards = options.totalShards ?? options.gatewayBot.shards ?? 1;
const gatewayManager = { const gatewayManager = {
// ---------- // ----------
// PROPERTIES // PROPERTIES
// ---------- // ----------
/** The max concurrency buckets. /** The max concurrency buckets.
* Those will be created when the `spawnShards` (which calls `prepareBuckets` under the hood) function gets called. * Those will be created when the `spawnShards` (which calls `prepareBuckets` under the hood) function gets called.
*/ */
buckets: new Map< buckets: new Map<
number, number,
{ {
workers: { id: number; queue: number[] }[]; workers: { id: number; queue: number[] }[];
leak: LeakyBucket; leak: LeakyBucket;
} }
>(), >(),
/** Id of the first Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
firstShardId: options.firstShardId ?? 0,
/** Important data which is used by the manager to connect shards to the gateway. */
gatewayBot: options.gatewayBot,
/** Id of the last Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
lastShardId: options.lastShardId ?? totalShards - 1 ?? 1,
/** This is where the Shards get stored.
* This will not be used when having a custom workers solution.
*/
manager: {} as ShardManager,
/** Delay in milliseconds to wait before spawning next shard.
* OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!!
*/
spawnShardDelay: options.spawnShardDelay ?? 5300,
/** How many Shards should get assigned to a Worker.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*
* NOTE: The last Worker will IGNORE this value,
* which means that the last worker can get assigned an unlimited amount of shards.
* This is not a bug but intended behavior and means you have to assign more workers to this manager.
*/
shardsPerWorker: options.shardsPerWorker ?? 25,
/** The total amount of Workers which get controlled by this manager.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*/
totalWorkers: options.totalWorkers ?? 4,
// ----------
// PROPERTIES
// ----------
/** Prepares the buckets for identifying.
*
* NOTE: Most of the time this function does not need to be called,
* since it gets called by the `spawnShards` function indirectly.
*/
prepareBuckets: function () {
return prepareBucketsOverwritten(this);
},
/** This function starts to spawn the Shards assigned to this manager.
*
* The managers `buckets` will be created and
*
* if `resharding.useOptimalLargeBotSharding` is set to true,
* `totalShards` gets double checked and adjusted accordingly if wrong.
*/
spawnShards: function () {
return spawnShardsOverwritten(this);
},
/** Stop the gateway. This closes all shards. */
stop: function (code: number, reason: string) {
return stopOverwritten(this, code, reason);
},
/** Tell the Worker with this Id to identify this Shard.
*
* Useful if a custom Worker solution should be used.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*/
tellWorkerToIdentify: function (workerId: number, shardId: number, bucketId: number) {
return tellWorkerToIdentifyOverwritten(this, workerId, shardId, bucketId);
},
// TODO: fix debug
/** Handle the different logs. Used for debugging. */
debug: options.debug || function () {},
// /** The methods related to resharding. */
// resharding: {
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
// useOptimalLargeBotSharding: options.resharding?.useOptimalLargeBotSharding ?? true,
// /** Whether or not to automatically reshard.
// *
// * @default true
// */
// reshard: options.resharding?.reshard ?? true,
// /** The percentage at which resharding should occur.
// *
// * @default 80
// */
// reshardPercentage: options.resharding?.reshardPercentage ?? 80,
// /** Handles resharding the bot when necessary. */
// resharder: options.resharding?.resharder ?? resharder,
// /** Handles checking if all new shards are online in the new gateway. */
// isPending: options.resharding?.isPending ?? resharderIsPending,
// /** Handles closing all shards in the old gateway. */
// closeOldShards: options.resharding?.closeOldShards ?? resharderCloseOldShards,
// /** Handles checking if it is time to reshard and triggers the resharder. */
// check: options.resharding?.check ?? startReshardingChecks,
// /** Handler to mark a guild id with its new shard id in cache. */
// markNewGuildShardId: options.resharding?.markNewGuildShardId ?? markNewGuildShardId,
// /** Handler to update all guilds in cache with the new shard id. */
// editGuildShardIds: options.resharding?.editGuildShardIds ?? reshardingEditGuildShardIds,
// },
/** Calculate the amount of Shards which should be used based on the bot's max concurrency. */
calculateTotalShards: function () {
return calculateTotalShardsOverwritten(this);
},
/** Calculate the Id of the Worker related to this Shard. */
calculateWorkerId: function (shardId: number) {
return calculateWorkerIdOverwritten(this, shardId);
},
};
gatewayManager.manager = createShardManager({
createShardOptions: options.createShardOptions,
gatewayConfig: options.gatewayConfig,
shardIds: [],
totalShards,
handleMessage: function (shard, message) {
return options.handleDiscordPayload(shard, message);
},
requestIdentify: async (shardId) => {
// TODO: improve
await gatewayManager.buckets.get(shardId % gatewayManager.gatewayBot.sessionStartLimit.maxConcurrency)!.leak
.acquire(1);
},
});
return gatewayManager;
}
export interface CreateGatewayManager {
/** Delay in milliseconds to wait before spawning next shard. OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!! */
spawnShardDelay: number;
/** Total amount of shards your bot uses. Useful for zero-downtime updates or resharding. */
totalShards: number;
/** The amount of shards to load per worker. */
shardsPerWorker: number;
/** The total amount of workers to use for your bot. */
totalWorkers: number;
/** Id of the first Shard which should get controlled by this manager. /** Id of the first Shard which should get controlled by this manager.
* *
* NOTE: This is intended for testing purposes * NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale. * if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production. * This is not recommended to be used in production.
*/ */
firstShardId: options.firstShardId ?? 0, firstShardId: number;
/** Important data which is used by the manager to connect shards to the gateway. */
gatewayBot: options.gatewayBot,
/** Id of the last Shard which should get controlled by this manager. /** Id of the last Shard which should get controlled by this manager.
* *
* NOTE: This is intended for testing purposes * NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale. * if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production. * This is not recommended to be used in production.
*/ */
lastShardId: options.lastShardId ?? totalShards - 1 ?? 1, lastShardId: number;
/** This is where the Shards get stored.
* This will not be used when having a custom workers solution.
*/
manager: {} as ShardManager,
/** Delay in milliseconds to wait before spawning next shard.
* OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!!
*/
spawnShardDelay: options.spawnShardDelay ?? 5300,
/** How many Shards should get assigned to a Worker.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*
* NOTE: The last Worker will IGNORE this value,
* which means that the last worker can get assigned an unlimited amount of shards.
* This is not a bug but intended behavior and means you have to assign more workers to this manager.
*/
shardsPerWorker: options.shardsPerWorker ?? 25,
/** The total amount of Workers which get controlled by this manager.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*/
totalWorkers: options.totalWorkers ?? 4,
// ---------- /** Important data which is used by the manager to connect shards to the gateway. */
// PROPERTIES gatewayBot: GatewayBot;
// ----------
/** Prepares the buckets for identifying. gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
*
* NOTE: Most of the time this function does not need to be called, /** Options which are used to create a new shard. */
* since it gets called by the `spawnShards` function indirectly. createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
*/
prepareBuckets: function () { /** Stored as bucketId: { workers: [workerId, [ShardIds]], createNextShard: boolean } */
return prepareBucketsOverwritten(this); buckets: Map<
}, number,
/** This function starts to spawn the Shards assigned to this manager. {
* workers: { id: number; queue: number[] }[];
* The managers `buckets` will be created and leak: LeakyBucket;
* }
* if `resharding.useOptimalLargeBotSharding` is set to true, >;
* `totalShards` gets double checked and adjusted accordingly if wrong. // METHODS
*/
spawnShards: function () { /** Prepares the buckets for identifying */
return spawnShardsOverwritten(this); prepareBuckets: typeof prepareBuckets;
}, /** The handler for spawning ALL the shards. */
/** Stop the gateway. This closes all shards. */ spawnShards: typeof spawnShards;
stop: function (code: number, reason: string) { /** The handler to close all shards. */
return stopOverwritten(this, code, reason); stop: typeof stop;
}, /** Sends the discord payload to another server. */
/** Tell the Worker with this Id to identify this Shard. handleDiscordPayload: (shard: Shard, data: DiscordGatewayPayload) => any;
* /** Tell the worker to begin identifying this shard */
* Useful if a custom Worker solution should be used. tellWorkerToIdentify: typeof tellWorkerToIdentify;
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*/
tellWorkerToIdentify: function (workerId: number, shardId: number, bucketId: number) {
return tellWorkerToIdentifyOverwritten(this, workerId, shardId, bucketId);
},
// TODO: fix debug
/** Handle the different logs. Used for debugging. */ /** Handle the different logs. Used for debugging. */
debug: options.debug || function () {}, debug: (text: GatewayDebugEvents, ...args: any[]) => unknown;
/** The methods related to resharding. */
// /** The methods related to resharding. */
// resharding: { // resharding: {
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */ // /** Whether the resharder should automatically switch to LARGE BOT SHARDING when you are above 100K servers. */
// useOptimalLargeBotSharding: options.resharding?.useOptimalLargeBotSharding ?? true, // useOptimalLargeBotSharding: boolean;
// /** Whether or not to automatically reshard. // /** Whether or not to automatically reshard. */
// * // reshard: boolean;
// * @default true // /** The percentage at which resharding should occur. */
// */ // reshardPercentage: number;
// reshard: options.resharding?.reshard ?? true,
// /** The percentage at which resharding should occur.
// *
// * @default 80
// */
// reshardPercentage: options.resharding?.reshardPercentage ?? 80,
// /** Handles resharding the bot when necessary. */ // /** Handles resharding the bot when necessary. */
// resharder: options.resharding?.resharder ?? resharder, // resharder: typeof resharder;
// /** Handles checking if all new shards are online in the new gateway. */ // /** Handles checking if all new shards are online in the new gateway. */
// isPending: options.resharding?.isPending ?? resharderIsPending, // isPending: typeof resharderIsPending;
// /** Handles closing all shards in the old gateway. */ // /** Handles closing all shards in the old gateway. */
// closeOldShards: options.resharding?.closeOldShards ?? resharderCloseOldShards, // closeOldShards: typeof resharderCloseOldShards;
// /** Handles checking if it is time to reshard and triggers the resharder. */
// check: options.resharding?.check ?? startReshardingChecks,
// /** Handler to mark a guild id with its new shard id in cache. */ // /** Handler to mark a guild id with its new shard id in cache. */
// markNewGuildShardId: options.resharding?.markNewGuildShardId ?? markNewGuildShardId, // markNewGuildShardId: typeof markNewGuildShardId;
// /** Handler to update all guilds in cache with the new shard id. */ // /** Handler to update all guilds in cache with the new shard id. */
// editGuildShardIds: options.resharding?.editGuildShardIds ?? reshardingEditGuildShardIds, // editGuildShardIds: typeof reshardingEditGuildShardIds;
// }, // };
/** Calculates the number of shards to use based on the max concurrency */
calculateTotalShards: typeof calculateTotalShards;
/** Calculate the amount of Shards which should be used based on the bot's max concurrency. */ /** Calculate the id of the worker related ot this Shard. */
calculateTotalShards: function () { calculateWorkerId: typeof calculateWorkerId;
return calculateTotalShardsOverwritten(this);
},
/** Calculate the Id of the Worker related to this Shard. */
calculateWorkerId: function (shardId: number) {
return calculateWorkerIdOverwritten(this, shardId);
},
};
gatewayManager.manager = createShardManager({
createShardOptions: options.createShardOptions,
gatewayConfig: options.gatewayConfig,
shardIds: [],
totalShards,
handleMessage: function (shard, message) {
return options.handleDiscordPayload(shard, message);
},
requestIdentify: async (shardId) => {
// TODO: improve
await gatewayManager.buckets.get(shardId % gatewayManager.gatewayBot.sessionStartLimit.maxConcurrency)!.leak
.acquire(1);
},
});
return gatewayManager;
}
export interface CreateGatewayManager {
/** Delay in milliseconds to wait before spawning next shard. OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!! */
spawnShardDelay: number;
/** Total amount of shards your bot uses. Useful for zero-downtime updates or resharding. */
totalShards: number;
/** The amount of shards to load per worker. */
shardsPerWorker: number;
/** The total amount of workers to use for your bot. */
totalWorkers: number;
/** Id of the first Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
firstShardId: number;
/** Id of the last Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
lastShardId: number;
/** Important data which is used by the manager to connect shards to the gateway. */
gatewayBot: GatewayBot;
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
/** Options which are used to create a new shard. */
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
/** Stored as bucketId: { workers: [workerId, [ShardIds]], createNextShard: boolean } */
buckets: Map<
number,
{
workers: { id: number; queue: number[] }[];
leak: LeakyBucket;
}
>;
// METHODS
/** Prepares the buckets for identifying */
prepareBuckets: typeof prepareBuckets;
/** The handler for spawning ALL the shards. */
spawnShards: typeof spawnShards;
/** The handler to close all shards. */
stop: typeof stop;
/** Sends the discord payload to another server. */
handleDiscordPayload: (shard: Shard, data: DiscordGatewayPayload) => any;
/** Tell the worker to begin identifying this shard */
tellWorkerToIdentify: typeof tellWorkerToIdentify;
/** Handle the different logs. Used for debugging. */
debug: (text: GatewayDebugEvents, ...args: any[]) => unknown;
/** The methods related to resharding. */
// resharding: {
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when you are above 100K servers. */
// useOptimalLargeBotSharding: boolean;
// /** Whether or not to automatically reshard. */
// reshard: boolean;
// /** The percentage at which resharding should occur. */
// reshardPercentage: number;
// /** Handles resharding the bot when necessary. */
// resharder: typeof resharder;
// /** Handles checking if all new shards are online in the new gateway. */
// isPending: typeof resharderIsPending;
// /** Handles closing all shards in the old gateway. */
// closeOldShards: typeof resharderCloseOldShards;
// /** Handler to mark a guild id with its new shard id in cache. */
// markNewGuildShardId: typeof markNewGuildShardId;
// /** Handler to update all guilds in cache with the new shard id. */
// editGuildShardIds: typeof reshardingEditGuildShardIds;
// };
/** Calculates the number of shards to use based on the max concurrency */
calculateTotalShards: typeof calculateTotalShards;
/** Calculate the id of the worker related ot this Shard. */
calculateWorkerId: typeof calculateWorkerId;
} }
export type GatewayDebugEvents = export type GatewayDebugEvents =
| "GW ERROR" | "GW ERROR"
| "GW CLOSED" | "GW CLOSED"
| "GW CLOSED_RECONNECT" | "GW CLOSED_RECONNECT"
| "GW RAW" | "GW RAW"
| "GW RECONNECT" | "GW RECONNECT"
| "GW INVALID_SESSION" | "GW INVALID_SESSION"
| "GW RESUMED" | "GW RESUMED"
| "GW RESUMING" | "GW RESUMING"
| "GW IDENTIFYING" | "GW IDENTIFYING"
| "GW RAW_SEND" | "GW RAW_SEND"
| "GW MAX REQUESTS" | "GW MAX REQUESTS"
| "GW DEBUG" | "GW DEBUG"
| "GW HEARTBEATING" | "GW HEARTBEATING"
| "GW HEARTBEATING_STARTED" | "GW HEARTBEATING_STARTED"
| "GW HEARTBEATING_DETAILS" | "GW HEARTBEATING_DETAILS"
| "GW HEARTBEATING_CLOSED"; | "GW HEARTBEATING_CLOSED";

View File

@ -2,46 +2,46 @@ import { createLeakyBucket } from "../../util/bucket.ts";
import { GatewayManager } from "./gatewayManager.ts"; import { GatewayManager } from "./gatewayManager.ts";
export function prepareBuckets(gateway: GatewayManager) { export function prepareBuckets(gateway: GatewayManager) {
for (let i = 0; i < gateway.gatewayBot.sessionStartLimit.maxConcurrency; ++i) { for (let i = 0; i < gateway.gatewayBot.sessionStartLimit.maxConcurrency; ++i) {
gateway.buckets.set(i, { gateway.buckets.set(i, {
workers: [], workers: [],
leak: createLeakyBucket({ leak: createLeakyBucket({
max: 1, max: 1,
refillAmount: 1, refillAmount: 1,
// special number which is proven to be working dont change // special number which is proven to be working dont change
refillInterval: gateway.spawnShardDelay, refillInterval: gateway.spawnShardDelay,
}), }),
}); });
}
// ORGANIZE ALL SHARDS INTO THEIR OWN BUCKETS
for (let shardId = gateway.firstShardId; shardId <= gateway.lastShardId; ++shardId) {
// gateway.debug("GW DEBUG", `1. Running for loop in spawnShards function for shardId ${i}.`);
if (shardId >= gateway.manager.totalShards) {
throw new Error(
`Shard (id: ${shardId}) is bigger or equal to the used amount of used shards which is ${gateway.manager.totalShards}`,
);
} }
const bucketId = shardId % gateway.gatewayBot.sessionStartLimit.maxConcurrency; // ORGANIZE ALL SHARDS INTO THEIR OWN BUCKETS
const bucket = gateway.buckets.get(bucketId); for (let shardId = gateway.firstShardId; shardId <= gateway.lastShardId; ++shardId) {
if (!bucket) { // gateway.debug("GW DEBUG", `1. Running for loop in spawnShards function for shardId ${i}.`);
throw new Error( if (shardId >= gateway.manager.totalShards) {
`Shard (id: ${shardId}) got assigned to an illegal bucket id: ${bucketId}, expected a bucket id between 0 and ${ throw new Error(
gateway.gatewayBot.sessionStartLimit.maxConcurrency - 1 `Shard (id: ${shardId}) is bigger or equal to the used amount of used shards which is ${gateway.manager.totalShards}`,
}`, );
); }
}
// FIND A QUEUE IN THIS BUCKET THAT HAS SPACE const bucketId = shardId % gateway.gatewayBot.sessionStartLimit.maxConcurrency;
// const worker = bucket.workers.find((w) => w.queue.length < gateway.shardsPerWorker); const bucket = gateway.buckets.get(bucketId);
const workerId = gateway.calculateWorkerId(shardId); if (!bucket) {
const worker = bucket.workers.find((w) => w.id === workerId); throw new Error(
if (worker) { `Shard (id: ${shardId}) got assigned to an illegal bucket id: ${bucketId}, expected a bucket id between 0 and ${
// IF THE QUEUE HAS SPACE JUST ADD IT TO THIS QUEUE gateway.gatewayBot.sessionStartLimit.maxConcurrency - 1
worker.queue.push(shardId); }`,
} else { );
bucket.workers.push({ id: workerId, queue: [shardId] }); }
// FIND A QUEUE IN THIS BUCKET THAT HAS SPACE
// const worker = bucket.workers.find((w) => w.queue.length < gateway.shardsPerWorker);
const workerId = gateway.calculateWorkerId(shardId);
const worker = bucket.workers.find((w) => w.id === workerId);
if (worker) {
// IF THE QUEUE HAS SPACE JUST ADD IT TO THIS QUEUE
worker.queue.push(shardId);
} else {
bucket.workers.push({ id: workerId, queue: [shardId] });
}
} }
}
} }

View File

@ -4,56 +4,56 @@ import { createGatewayManager, GatewayManager } from "./gatewayManager.ts";
export type Resharder = ReturnType<typeof activateResharder>; export type Resharder = ReturnType<typeof activateResharder>;
export function activateResharder(options: ActivateResharderOptions) { export function activateResharder(options: ActivateResharderOptions) {
const resharder = { const resharder = {
// ---------- // ----------
// PROPERTIES // PROPERTIES
// ---------- // ----------
/** Interval in milliseconds of when to check whether it's time to reshard. /** Interval in milliseconds of when to check whether it's time to reshard.
* *
* @default 28800000 (8 hours) * @default 28800000 (8 hours)
*/ */
checkInterval: options.checkInterval || 28800000, checkInterval: options.checkInterval || 28800000,
/** Gateway manager which is currently processing all shards and events. */ /** Gateway manager which is currently processing all shards and events. */
gateway: options.gatewayManager, gateway: options.gatewayManager,
/** Timeout of the reshard checker. */ /** Timeout of the reshard checker. */
intervalId: undefined as number | undefined, intervalId: undefined as number | undefined,
/** Percentage at which resharding should occur. /** Percentage at which resharding should occur.
* @default 80 * @default 80
*/ */
percentage: options.percentage ?? 80, percentage: options.percentage ?? 80,
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */ /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
useOptimalLargeBotSharding: options.useOptimalLargeBotSharding ?? true, useOptimalLargeBotSharding: options.useOptimalLargeBotSharding ?? true,
// ---------- // ----------
// METHODS // METHODS
// ---------- // ----------
/** Activate the resharder and delay the next reshard check. */ /** Activate the resharder and delay the next reshard check. */
activate: function () { activate: function () {
return activate(this); return activate(this);
}, },
/** Function which is used to fetch the current gateway information of the bot. /** Function which is used to fetch the current gateway information of the bot.
* This function is mainly used by the reshard checker. * This function is mainly used by the reshard checker.
*/ */
getGatewayBot: options.getGatewayBot, getGatewayBot: options.getGatewayBot,
/** Reshard the bots gateway. */ /** Reshard the bots gateway. */
reshard: function (gatewayBot: GatewayBot) { reshard: function (gatewayBot: GatewayBot) {
return reshard(this, gatewayBot); return reshard(this, gatewayBot);
}, },
tellWorkerToPrepare: options.tellWorkerToPrepare, tellWorkerToPrepare: options.tellWorkerToPrepare,
}; };
resharder.activate(); resharder.activate();
return resharder; return resharder;
} }
// /** The methods related to resharding. */ // /** The methods related to resharding. */
@ -85,106 +85,106 @@ export function activateResharder(options: ActivateResharderOptions) {
// }, // },
export interface ActivateResharderOptions { export interface ActivateResharderOptions {
/** Interval in milliseconds of when to check whether it's time to reshard. /** Interval in milliseconds of when to check whether it's time to reshard.
* *
* @default 28800000 (8 hours) * @default 28800000 (8 hours)
*/ */
checkInterval?: number; checkInterval?: number;
/** Gateway manager which the resharder should be bound to. */ /** Gateway manager which the resharder should be bound to. */
gatewayManager: GatewayManager; gatewayManager: GatewayManager;
/** Percentage at which resharding should occur. /** Percentage at which resharding should occur.
* @default 80 * @default 80
*/ */
percentage?: number; percentage?: number;
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */ /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
useOptimalLargeBotSharding?: boolean; useOptimalLargeBotSharding?: boolean;
/** Function which can be used to fetch the current gateway information of the bot. /** Function which can be used to fetch the current gateway information of the bot.
* This function is mainly used by the reshard checker. * This function is mainly used by the reshard checker.
*/ */
getGatewayBot(): Promise<GatewayBot>; getGatewayBot(): Promise<GatewayBot>;
/** Function which is used to tell a Worker that it should identify a resharder Shard to the gateway and wait for further instructions. /** Function which is used to tell a Worker that it should identify a resharder Shard to the gateway and wait for further instructions.
* The worker should **NOT** process any events coming from this Shard. * The worker should **NOT** process any events coming from this Shard.
*/ */
tellWorkerToPrepare( tellWorkerToPrepare(
gatewayManager: GatewayManager, gatewayManager: GatewayManager,
workerId: number, workerId: number,
shardId: number, shardId: number,
bucketId: number, bucketId: number,
): Promise<void>; ): Promise<void>;
} }
/** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */ /** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
export function activate(resharder: Resharder): void { export function activate(resharder: Resharder): void {
if (resharder.intervalId !== undefined) { if (resharder.intervalId !== undefined) {
throw new Error("[RESHARDER] Cannot activate the resharder more than one time."); throw new Error("[RESHARDER] Cannot activate the resharder more than one time.");
} }
resharder.intervalId = setInterval(async () => { resharder.intervalId = setInterval(async () => {
// gateway.debug("GW DEBUG", "[Resharding] Checking if resharding is needed."); // gateway.debug("GW DEBUG", "[Resharding] Checking if resharding is needed.");
// TODO: is it possible to route this to REST? // TODO: is it possible to route this to REST?
const result = await resharder.getGatewayBot(); const result = await resharder.getGatewayBot();
const percentage = const percentage =
((result.shards - resharder.gateway.manager.totalShards) / resharder.gateway.manager.totalShards) * 100; ((result.shards - resharder.gateway.manager.totalShards) / resharder.gateway.manager.totalShards) * 100;
// Less than necessary% being used so do nothing // Less than necessary% being used so do nothing
if (percentage < resharder.percentage) return; if (percentage < resharder.percentage) return;
// Don't have enough identify rate limits to reshard // Don't have enough identify rate limits to reshard
if (result.sessionStartLimit.remaining < result.shards) return; if (result.sessionStartLimit.remaining < result.shards) return;
// MULTI-SERVER BOTS OVERRIDE THIS IF YOU NEED TO RESHARD SERVER BY SERVER // MULTI-SERVER BOTS OVERRIDE THIS IF YOU NEED TO RESHARD SERVER BY SERVER
return resharder.reshard(result); return resharder.reshard(result);
}, resharder.checkInterval); }, resharder.checkInterval);
} }
export async function reshard(resharder: Resharder, gatewayBot: GatewayBot) { export async function reshard(resharder: Resharder, gatewayBot: GatewayBot) {
// oldGateway.debug("GW DEBUG", "[Resharding] Starting the reshard process."); // oldGateway.debug("GW DEBUG", "[Resharding] Starting the reshard process.");
// Create a temporary gateway manager for easier handling. // Create a temporary gateway manager for easier handling.
const tmpManager = createGatewayManager({ const tmpManager = createGatewayManager({
gatewayBot: gatewayBot, gatewayBot: gatewayBot,
gatewayConfig: resharder.gateway.manager.gatewayConfig, gatewayConfig: resharder.gateway.manager.gatewayConfig,
handleDiscordPayload: () => {}, handleDiscordPayload: () => {},
tellWorkerToIdentify: resharder.tellWorkerToPrepare, tellWorkerToIdentify: resharder.tellWorkerToPrepare,
}); });
// Begin resharding // Begin resharding
// If more than 100K servers, begin switching to 16x sharding // If more than 100K servers, begin switching to 16x sharding
if (resharder.useOptimalLargeBotSharding) { if (resharder.useOptimalLargeBotSharding) {
// gateway.debug("GW DEBUG", "[Resharding] Using optimal large bot sharding solution."); // gateway.debug("GW DEBUG", "[Resharding] Using optimal large bot sharding solution.");
tmpManager.manager.totalShards = resharder.gateway.calculateTotalShards(resharder.gateway); tmpManager.manager.totalShards = resharder.gateway.calculateTotalShards(resharder.gateway);
} }
tmpManager.spawnShards(tmpManager); tmpManager.spawnShards(tmpManager);
return new Promise((resolve) => { return new Promise((resolve) => {
// TIMER TO KEEP CHECKING WHEN ALL SHARDS HAVE RESHARDED // TIMER TO KEEP CHECKING WHEN ALL SHARDS HAVE RESHARDED
const timer = setInterval(async () => { const timer = setInterval(async () => {
const pending = await gateway.resharding.isPending(gateway, oldGateway); const pending = await gateway.resharding.isPending(gateway, oldGateway);
// STILL PENDING ON SOME SHARDS TO BE CREATED // STILL PENDING ON SOME SHARDS TO BE CREATED
if (pending) return; if (pending) return;
// ENABLE EVENTS ON NEW SHARDS AND IGNORE EVENTS ON OLD // ENABLE EVENTS ON NEW SHARDS AND IGNORE EVENTS ON OLD
const oldHandler = oldGateway.handleDiscordPayload; const oldHandler = oldGateway.handleDiscordPayload;
gateway.handleDiscordPayload = oldHandler; gateway.handleDiscordPayload = oldHandler;
oldGateway.handleDiscordPayload = function (og, data, shardId) { oldGateway.handleDiscordPayload = function (og, data, shardId) {
// ALLOW EXCEPTION FOR CHUNKING TO PREVENT REQUESTS FREEZING // ALLOW EXCEPTION FOR CHUNKING TO PREVENT REQUESTS FREEZING
if (data.t !== "GUILD_MEMBERS_CHUNK") return; if (data.t !== "GUILD_MEMBERS_CHUNK") return;
oldHandler(og, data, shardId); oldHandler(og, data, shardId);
}; };
// STOP TIMER // STOP TIMER
clearInterval(timer); clearInterval(timer);
await gateway.resharding.editGuildShardIds(); await gateway.resharding.editGuildShardIds();
await gateway.resharding.closeOldShards(oldGateway); await gateway.resharding.closeOldShards(oldGateway);
gateway.debug("GW DEBUG", "[Resharding] Complete."); gateway.debug("GW DEBUG", "[Resharding] Complete.");
resolve(gateway); resolve(gateway);
}, 30000); }, 30000);
}) as Promise<GatewayManager>; }) as Promise<GatewayManager>;
} }
// /** The handler to automatically reshard when necessary. */ // /** The handler to automatically reshard when necessary. */
@ -269,41 +269,41 @@ export async function reshard(resharder: Resharder, gatewayBot: GatewayBot) {
/** Handler that by default will check all new shards are online in the new gateway. The handler can be overridden if you have multiple servers to communicate through redis pubsub or whatever you prefer. */ /** Handler that by default will check all new shards are online in the new gateway. The handler can be overridden if you have multiple servers to communicate through redis pubsub or whatever you prefer. */
export async function resharderIsPending( export async function resharderIsPending(
gateway: GatewayManager, gateway: GatewayManager,
oldGateway: GatewayManager, oldGateway: GatewayManager,
) { ) {
for (let i = gateway.firstShardId; i < gateway.lastShardId; i++) { for (let i = gateway.firstShardId; i < gateway.lastShardId; i++) {
const shard = gateway.shards.get(i); const shard = gateway.shards.get(i);
if (!shard?.ready) { if (!shard?.ready) {
return true; return true;
}
} }
}
return false; return false;
} }
/** Handler that by default closes all shards in the old gateway. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */ /** Handler that by default closes all shards in the old gateway. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
export async function resharderCloseOldShards(oldGateway: GatewayManager) { export async function resharderCloseOldShards(oldGateway: GatewayManager) {
// SHUT DOWN ALL SHARDS IF NOTHING IN QUEUE // SHUT DOWN ALL SHARDS IF NOTHING IN QUEUE
oldGateway.shards.forEach((shard) => { oldGateway.shards.forEach((shard) => {
// CLOSE THIS SHARD IT HAS NO QUEUE // CLOSE THIS SHARD IT HAS NO QUEUE
if (!shard.processingQueue && !shard.queue.length) { if (!shard.processingQueue && !shard.queue.length) {
return oldGateway.closeWS( return oldGateway.closeWS(
shard.ws, shard.ws,
3066, 3066,
"Shard has been resharded. Closing shard since it has no queue.", "Shard has been resharded. Closing shard since it has no queue.",
); );
} }
// IF QUEUE EXISTS GIVE IT 5 MINUTES TO COMPLETE // IF QUEUE EXISTS GIVE IT 5 MINUTES TO COMPLETE
setTimeout(() => { setTimeout(() => {
oldGateway.closeWS( oldGateway.closeWS(
shard.ws, shard.ws,
3066, 3066,
"Shard has been resharded. Delayed closing shard since it had a queue.", "Shard has been resharded. Delayed closing shard since it had a queue.",
); );
}, 300000); }, 300000);
}); });
} }
// /** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */ // /** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
@ -330,10 +330,10 @@ export async function resharderCloseOldShards(oldGateway: GatewayManager) {
/** Handler that by default will save the new shard id for each guild this becomes ready in new gateway. This can be overridden to save the shard ids in a redis cache layer or whatever you prefer. These ids will be used later to update all guilds. */ /** Handler that by default will save the new shard id for each guild this becomes ready in new gateway. This can be overridden to save the shard ids in a redis cache layer or whatever you prefer. These ids will be used later to update all guilds. */
export async function markNewGuildShardId(guildIds: bigint[], shardId: number) { export async function markNewGuildShardId(guildIds: bigint[], shardId: number) {
// PLACEHOLDER TO LET YOU MARK A GUILD ID AND SHARD ID FOR LATER USE ONCE RESHARDED // PLACEHOLDER TO LET YOU MARK A GUILD ID AND SHARD ID FOR LATER USE ONCE RESHARDED
} }
/** Handler that by default does not do anything since by default the library will not cache. */ /** Handler that by default does not do anything since by default the library will not cache. */
export async function reshardingEditGuildShardIds() { export async function reshardingEditGuildShardIds() {
// PLACEHOLDER TO LET YOU UPDATE CACHED GUILDS // PLACEHOLDER TO LET YOU UPDATE CACHED GUILDS
} }

View File

@ -17,106 +17,106 @@ export type ShardManager = ReturnType<typeof createShardManager>;
* The aim of this is to provide an easy to use manager which can be used by workers or any other kind of separate process. * The aim of this is to provide an easy to use manager which can be used by workers or any other kind of separate process.
*/ */
export function createShardManager(options: CreateShardManager) { export function createShardManager(options: CreateShardManager) {
return { return {
// ----------
// PROPERTIES
// ----------
/** Options which are used to create a new Shard. */
createShardOptions: {
...options.createShardOptions,
events: {
...options.createShardOptions?.events,
message: options.createShardOptions?.events?.message ?? options.handleMessage,
},
},
/** Gateway configuration which is used when creating a Shard. */
gatewayConfig: options.gatewayConfig,
/** Managed Shards. */
shards: new Collection(
options.shardIds.map((shardId) => {
const shard = createShard({
...options.createShardOptions,
id: shardId,
totalShards: options.totalShards,
gatewayConfig: options.gatewayConfig,
requestIdentify: async function () {
return await options.requestIdentify(shardId);
},
});
return [shardId, shard] as const;
}),
),
/** Total amount of Shards used by the bot. */
totalShards: options.totalShards,
// ----------
// METHODS
// ----------
/** Tell the manager to identify a Shard.
* If this Shard is not already managed this will also add the Shard to the manager.
*/
identify: async function (shardId: number) {
let shard = this.shards.get(shardId);
if (!shard) {
shard = createShard({
...this.createShardOptions,
id: shardId,
totalShards: this.totalShards,
gatewayConfig: this.gatewayConfig,
requestIdentify: async function () {
return await options.requestIdentify(shardId);
},
});
this.shards.set(shardId, shard);
}
return await shard.identify();
},
/** Kill a shard.
* Close a shards connection to Discord's gateway (if any) and remove it from the manager.
*/
kill: async function (shardId: number) {
const shard = this.shards.get(shardId);
if (!shard) return;
this.shards.delete(shardId);
return await shard.shutdown();
},
/** This function communicates with the parent manager,
* in order to know whether this manager is allowed to identify a new shard.
*/
requestIdentify: options.requestIdentify,
};
}
export interface CreateShardManager {
// ---------- // ----------
// PROPERTIES // PROPERTIES
// ---------- // ----------
/** Options which are used to create a new Shard. */ /** Options which are used to create a new Shard. */
createShardOptions: { createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
...options.createShardOptions,
events: {
...options.createShardOptions?.events,
message: options.createShardOptions?.events?.message ?? options.handleMessage,
},
},
/** Gateway configuration which is used when creating a Shard. */ /** Gateway configuration which is used when creating a Shard. */
gatewayConfig: options.gatewayConfig, gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
/** Managed Shards. */ /** Ids of the Shards which should be managed. */
shards: new Collection( shardIds: number[];
options.shardIds.map((shardId) => { /** Total amount of Shard used by the bot. */
const shard = createShard({ totalShards: number;
...options.createShardOptions,
id: shardId,
totalShards: options.totalShards,
gatewayConfig: options.gatewayConfig,
requestIdentify: async function () {
return await options.requestIdentify(shardId);
},
});
return [shardId, shard] as const;
}),
),
/** Total amount of Shards used by the bot. */
totalShards: options.totalShards,
// ---------- // ----------
// METHODS // METHODS
// ---------- // ----------
/** Tell the manager to identify a Shard. /** This function is used when a shard receives any message from Discord. */
* If this Shard is not already managed this will also add the Shard to the manager. handleMessage(shard: Shard, message: DiscordGatewayPayload): unknown;
*/
identify: async function (shardId: number) {
let shard = this.shards.get(shardId);
if (!shard) {
shard = createShard({
...this.createShardOptions,
id: shardId,
totalShards: this.totalShards,
gatewayConfig: this.gatewayConfig,
requestIdentify: async function () {
return await options.requestIdentify(shardId);
},
});
this.shards.set(shardId, shard);
}
return await shard.identify();
},
/** Kill a shard.
* Close a shards connection to Discord's gateway (if any) and remove it from the manager.
*/
kill: async function (shardId: number) {
const shard = this.shards.get(shardId);
if (!shard) return;
this.shards.delete(shardId);
return await shard.shutdown();
},
/** This function communicates with the parent manager, /** This function communicates with the parent manager,
* in order to know whether this manager is allowed to identify a new shard. * in order to know whether this manager is allowed to identify a new shard. #
*/ */
requestIdentify: options.requestIdentify, requestIdentify(shardId: number): Promise<void>;
};
}
export interface CreateShardManager {
// ----------
// PROPERTIES
// ----------
/** Options which are used to create a new Shard. */
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
/** Gateway configuration which is used when creating a Shard. */
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
/** Ids of the Shards which should be managed. */
shardIds: number[];
/** Total amount of Shard used by the bot. */
totalShards: number;
// ----------
// METHODS
// ----------
/** This function is used when a shard receives any message from Discord. */
handleMessage(shard: Shard, message: DiscordGatewayPayload): unknown;
/** This function communicates with the parent manager,
* in order to know whether this manager is allowed to identify a new shard. #
*/
requestIdentify(shardId: number): Promise<void>;
} }

View File

@ -6,27 +6,27 @@ import { createGatewayManager, GatewayManager } from "./gatewayManager.ts";
/** Begin spawning shards. */ /** Begin spawning shards. */
export function spawnShards(gateway: GatewayManager) { export function spawnShards(gateway: GatewayManager) {
// PREPARES THE MAX SHARD COUNT BY CONCURRENCY // PREPARES THE MAX SHARD COUNT BY CONCURRENCY
// if (manager.resharding.useOptimalLargeBotSharding) { // if (manager.resharding.useOptimalLargeBotSharding) {
// // gateway.debug("GW DEBUG", "[Spawning] Using optimal large bot sharding solution."); // // gateway.debug("GW DEBUG", "[Spawning] Using optimal large bot sharding solution.");
// manager.manager.totalShards = manager.calculateTotalShards( // manager.manager.totalShards = manager.calculateTotalShards(
// manager, // manager,
// ); // );
// } // }
// PREPARES ALL SHARDS IN SPECIFIC BUCKETS // PREPARES ALL SHARDS IN SPECIFIC BUCKETS
gateway.prepareBuckets(); gateway.prepareBuckets();
// SPREAD THIS OUT TO DIFFERENT WORKERS TO BEGIN STARTING UP // SPREAD THIS OUT TO DIFFERENT WORKERS TO BEGIN STARTING UP
gateway.buckets.forEach(async (bucket, bucketId) => { gateway.buckets.forEach(async (bucket, bucketId) => {
// gateway.debug("GW DEBUG", `2. Running forEach loop in spawnShards function.`); // gateway.debug("GW DEBUG", `2. Running forEach loop in spawnShards function.`);
for (const worker of bucket.workers) { for (const worker of bucket.workers) {
// gateway.debug("GW DEBUG", `3. Running for of loop in spawnShards function.`); // gateway.debug("GW DEBUG", `3. Running for of loop in spawnShards function.`);
for (const shardId of worker.queue) { for (const shardId of worker.queue) {
await gateway.tellWorkerToIdentify(worker.id, shardId, bucketId); await gateway.tellWorkerToIdentify(worker.id, shardId, bucketId);
} }
} }
}); });
} }

View File

@ -2,7 +2,7 @@ import { delay } from "../../util/delay.ts";
import { GatewayManager } from "./gatewayManager.ts"; import { GatewayManager } from "./gatewayManager.ts";
export async function stop(gateway: GatewayManager, code: number, reason: string) { export async function stop(gateway: GatewayManager, code: number, reason: string) {
gateway.manager.shards.forEach((shard) => shard.close(code, reason)); gateway.manager.shards.forEach((shard) => shard.close(code, reason));
await delay(5000); await delay(5000);
} }

View File

@ -4,10 +4,10 @@ import { GatewayManager } from "./gatewayManager.ts";
/** Allows users to hook in and change to communicate to different workers across different servers or anything they like. For example using redis pubsub to talk to other servers. */ /** Allows users to hook in and change to communicate to different workers across different servers or anything they like. For example using redis pubsub to talk to other servers. */
export async function tellWorkerToIdentify( export async function tellWorkerToIdentify(
gateway: GatewayManager, gateway: GatewayManager,
_workerId: number, _workerId: number,
shardId: number, shardId: number,
_bucketId: number, _bucketId: number,
): Promise<void> { ): Promise<void> {
return await gateway.manager.identify(shardId); return await gateway.manager.identify(shardId);
} }

View File

@ -1,9 +1,9 @@
import { Shard } from "./types.ts"; import { Shard } from "./types.ts";
export function calculateSafeRequests(shard: Shard) { export function calculateSafeRequests(shard: Shard) {
// * 2 adds extra safety layer for discords OP 1 requests that we need to respond to // * 2 adds extra safety layer for discords OP 1 requests that we need to respond to
const safeRequests = shard.maxRequestsPerRateLimitTick - const safeRequests = shard.maxRequestsPerRateLimitTick -
Math.ceil(shard.rateLimitResetInterval / shard.heart.interval) * 2; Math.ceil(shard.rateLimitResetInterval / shard.heart.interval) * 2;
return safeRequests < 0 ? 0 : safeRequests; return safeRequests < 0 ? 0 : safeRequests;
} }

View File

@ -1,7 +1,7 @@
import { Shard } from "./types.ts"; import { Shard } from "./types.ts";
export function close(shard: Shard, code: number, reason: string): void { export function close(shard: Shard, code: number, reason: string): void {
if (shard.socket?.readyState !== WebSocket.OPEN) return; if (shard.socket?.readyState !== WebSocket.OPEN) return;
return shard.socket?.close(code, reason); return shard.socket?.close(code, reason);
} }

View File

@ -1,34 +1,34 @@
import { Shard, ShardState } from "./types.ts"; import { Shard, ShardState } from "./types.ts";
export async function connect(shard: Shard): Promise<void> { export async function connect(shard: Shard): Promise<void> {
// Only set the shard to `Connecting` state, // Only set the shard to `Connecting` state,
// if the connection request does not come from an identify or resume action. // if the connection request does not come from an identify or resume action.
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) { if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
shard.state = ShardState.Connecting; shard.state = ShardState.Connecting;
} }
shard.events.connecting?.(shard); shard.events.connecting?.(shard);
// Explicitly setting the encoding to json, since we do not support ETF. // Explicitly setting the encoding to json, since we do not support ETF.
const socket = new WebSocket(`${shard.gatewayConfig.url}/?v=${shard.gatewayConfig.version}&encoding=json`); const socket = new WebSocket(`${shard.gatewayConfig.url}/?v=${shard.gatewayConfig.version}&encoding=json`);
shard.socket = socket; shard.socket = socket;
// TODO: proper event handling // TODO: proper event handling
socket.onerror = (event) => console.log({ error: event }); socket.onerror = (event) => console.log({ error: event });
socket.onclose = (event) => shard.handleClose(event); socket.onclose = (event) => shard.handleClose(event);
socket.onmessage = (message) => shard.handleMessage(message); socket.onmessage = (message) => shard.handleMessage(message);
return new Promise((resolve) => { return new Promise((resolve) => {
socket.onopen = () => { socket.onopen = () => {
// Only set the shard to `Unidentified` state, // Only set the shard to `Unidentified` state,
// if the connection request does not come from an identify or resume action. // if the connection request does not come from an identify or resume action.
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) { if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
shard.state = ShardState.Unidentified; shard.state = ShardState.Unidentified;
} }
shard.events.connected?.(shard); shard.events.connected?.(shard);
resolve(); resolve();
}; };
}); });
} }

View File

@ -1,16 +1,16 @@
import { identify } from "./identify.ts"; import { identify } from "./identify.ts";
import { handleMessage } from "./handleMessage.ts"; import { handleMessage } from "./handleMessage.ts";
import { import {
DEFAULT_HEARTBEAT_INTERVAL, DEFAULT_HEARTBEAT_INTERVAL,
GATEWAY_RATE_LIMIT_RESET_INTERVAL, GATEWAY_RATE_LIMIT_RESET_INTERVAL,
MAX_GATEWAY_REQUESTS_PER_INTERVAL, MAX_GATEWAY_REQUESTS_PER_INTERVAL,
Shard, Shard,
ShardEvents, ShardEvents,
ShardGatewayConfig, ShardGatewayConfig,
ShardHeart, ShardHeart,
ShardSocketCloseCodes, ShardSocketCloseCodes,
ShardSocketRequest, ShardSocketRequest,
ShardState, ShardState,
} from "./types.ts"; } from "./types.ts";
import { startHeartbeating } from "./startHeartbeating.ts"; import { startHeartbeating } from "./startHeartbeating.ts";
import { stopHeartbeating } from "./stopHeartbeating.ts"; import { stopHeartbeating } from "./stopHeartbeating.ts";
@ -33,301 +33,301 @@ import { API_VERSION } from "../../util/constants.ts";
/** */ /** */
export function createShard( export function createShard(
options: CreateShard, options: CreateShard,
) { ) {
// This is done for performance reasons // This is done for performance reasons
const calculateSafeRequestsOverwritten = options.calculateSafeRequests ?? calculateSafeRequests; const calculateSafeRequestsOverwritten = options.calculateSafeRequests ?? calculateSafeRequests;
const closeOverwritten = options.close ?? close; const closeOverwritten = options.close ?? close;
const connectOverwritten = options.connect ?? connect; const connectOverwritten = options.connect ?? connect;
const identifyOverwritten = options.identify ?? identify; const identifyOverwritten = options.identify ?? identify;
const sendOverwritten = options.send ?? send; const sendOverwritten = options.send ?? send;
const shutdownOverwritten = options.shutdown ?? shutdown; const shutdownOverwritten = options.shutdown ?? shutdown;
const resumeOverwritten = options.resume ?? resume; const resumeOverwritten = options.resume ?? resume;
const handleCloseOverwritten = options.handleClose ?? handleClose; const handleCloseOverwritten = options.handleClose ?? handleClose;
const handleMessageOverwritten = options.handleMessage ?? handleMessage; const handleMessageOverwritten = options.handleMessage ?? handleMessage;
const isOpenOverwritten = options.isOpen ?? isOpen; const isOpenOverwritten = options.isOpen ?? isOpen;
const startHeartbeatingOverwritten = options.startHeartbeating ?? startHeartbeating; const startHeartbeatingOverwritten = options.startHeartbeating ?? startHeartbeating;
const stopHeartbeatingOverwritten = options.stopHeartbeating ?? stopHeartbeating; const stopHeartbeatingOverwritten = options.stopHeartbeating ?? stopHeartbeating;
return { return {
// ---------- // ----------
// PROPERTIES // PROPERTIES
// ---------- // ----------
/** The gateway configuration which is used to connect to Discord. */
gatewayConfig: {
compress: options.gatewayConfig.compress ?? false,
intents: options.gatewayConfig.intents ?? 0,
properties: {
os: options.gatewayConfig?.properties?.os ?? Deno.build.os,
browser: options.gatewayConfig?.properties?.browser ?? "Discordeno",
device: options.gatewayConfig?.properties?.device ?? "Discordeno",
},
token: options.gatewayConfig.token,
url: options.gatewayConfig.url ?? "wss://gateway.discord.gg",
version: options.gatewayConfig.version ?? API_VERSION,
} as ShardGatewayConfig,
/** This contains all the heartbeat information */
heart: {
acknowledged: false,
interval: DEFAULT_HEARTBEAT_INTERVAL,
} as ShardHeart,
/** Id of the shard. */
id: options.id,
/** The maximum of requests which can be send to discord per rate limit tick.
* Typically this value should not be changed.
*/
maxRequestsPerRateLimitTick: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
/** The previous payload sequence number. */
previousSequenceNumber: options.previousSequenceNumber || null,
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
rateLimitResetInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
/** Current session id of the shard if present. */
sessionId: undefined as string | undefined,
/** This contains the WebSocket connection to Discord, if currently connected. */
socket: undefined as WebSocket | undefined,
/** Current internal state of the shard. */
state: ShardState.Offline,
/** The total amount of shards which are used to communicate with Discord. */
totalShards: options.totalShards,
// ----------
// METHODS
// ----------
/** The shard related event handlers. */
events: options.events ?? {} as ShardEvents,
/** Calculate the amount of requests which can safely be made per rate limit interval,
* before the gateway gets disconnected due to an exceeded rate limit.
*/
calculateSafeRequests: function () {
return calculateSafeRequestsOverwritten(this);
},
/** Close the socket connection to discord if present. */
close: function (code: number, reason: string) {
return closeOverwritten(this, code, reason);
},
/** Connect the shard with the gateway and start heartbeating.
* This will not identify the shard to the gateway.
*/
connect: async function () {
return await connectOverwritten(this);
},
/** Identify the shard to the gateway.
* If not connected, this will also connect the shard to the gateway.
*/
identify: async function () {
return await identifyOverwritten(this);
},
/** Check whether the connection to Discord is currently open. */
isOpen: function () {
return isOpenOverwritten(this);
},
/** Function which can be overwritten in order to get the shards presence. */
// This function allows to be async, in case the devs create the presence based on eg. database values.
// Passing the shard's id there to make it easier for the dev to use this function.
makePresence: options.makePresence,
/** Attempt to resume the previous shards session with the gateway. */
resume: async function () {
return await resumeOverwritten(this);
},
/** Send a message to Discord.
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
*/
send: async function (message: ShardSocketRequest, highPriority: boolean = false) {
return await sendOverwritten(this, message, highPriority);
},
/** Shutdown the shard.
* Forcefully disconnect the shard from Discord.
* The shard may not attempt to reconnect with Discord.
*/
shutdown: async function () {
return await shutdownOverwritten(this);
},
/** @private Internal shard bucket.
* Only access this if you know what you are doing.
*
* Bucket for handling shard request rate limits.
*/
bucket: createLeakyBucket({
max: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
refillAmount: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
}),
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle a gateway connection close.
*/
handleClose: async function (close: CloseEvent) {
return await handleCloseOverwritten(this, close);
},
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle an incoming gateway message.
*/
handleMessage: async function (message: MessageEvent<any>) {
return await handleMessageOverwritten(this, message);
},
/** This function communicates with the management process, in order to know whether its free to identify. */
requestIdentify: async function () {
return await options.requestIdentify(this.id);
},
/** @private Internal state.
* Only use this if you know what you are doing.
*
* Cache for pending gateway requests which should have been send while the gateway went offline.
*/
offlineSendQueue: [] as ((_?: unknown) => void)[],
/** @private Internal shard map.
* Only use this map if you know what you are doing.
*
* This is used to resolve internal waiting states.
* Mapped by SelectedEvents => ResolveFunction
*/
resolves: new Map<"READY" | "RESUMED" | "INVALID_SESSION", (payload: DiscordGatewayPayload) => void>(),
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Start sending heartbeat payloads to Discord in the provided interval.
*/
startHeartbeating: function (interval: number) {
return startHeartbeatingOverwritten(this, interval);
},
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Stop the heartbeating process with discord.
*/
stopHeartbeating: function () {
return stopHeartbeatingOverwritten(this);
},
};
}
export interface CreateShard {
/** Id of the shard which should be created. */
id: number;
/** Gateway configuration for the shard. */
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
/** The gateway configuration which is used to connect to Discord. */
gatewayConfig: {
compress: options.gatewayConfig.compress ?? false,
intents: options.gatewayConfig.intents ?? 0,
properties: {
os: options.gatewayConfig?.properties?.os ?? Deno.build.os,
browser: options.gatewayConfig?.properties?.browser ?? "Discordeno",
device: options.gatewayConfig?.properties?.device ?? "Discordeno",
},
token: options.gatewayConfig.token,
url: options.gatewayConfig.url ?? "wss://gateway.discord.gg",
version: options.gatewayConfig.version ?? API_VERSION,
} as ShardGatewayConfig,
/** This contains all the heartbeat information */
heart: {
acknowledged: false,
interval: DEFAULT_HEARTBEAT_INTERVAL,
} as ShardHeart,
/** Id of the shard. */
id: options.id,
/** The maximum of requests which can be send to discord per rate limit tick.
* Typically this value should not be changed.
*/
maxRequestsPerRateLimitTick: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
/** The previous payload sequence number. */
previousSequenceNumber: options.previousSequenceNumber || null,
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
rateLimitResetInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
/** Current session id of the shard if present. */
sessionId: undefined as string | undefined,
/** This contains the WebSocket connection to Discord, if currently connected. */
socket: undefined as WebSocket | undefined,
/** Current internal state of the shard. */
state: ShardState.Offline,
/** The total amount of shards which are used to communicate with Discord. */ /** The total amount of shards which are used to communicate with Discord. */
totalShards: options.totalShards, totalShards: number;
// ---------- /** This function communicates with the management process, in order to know whether its free to identify.
// METHODS * When this function resolves, this means that the shard is allowed to send an identify payload to discord.
// ---------- */
requestIdentify: (shardId: number) => Promise<void>;
/** The shard related event handlers. */
events: options.events ?? {} as ShardEvents,
/** Calculate the amount of requests which can safely be made per rate limit interval, /** Calculate the amount of requests which can safely be made per rate limit interval,
* before the gateway gets disconnected due to an exceeded rate limit. * before the gateway gets disconnected due to an exceeded rate limit.
*/ */
calculateSafeRequests: function () { calculateSafeRequests?: typeof calculateSafeRequests;
return calculateSafeRequestsOverwritten(this);
},
/** Close the socket connection to discord if present. */ /** Close the socket connection to discord if present. */
close: function (code: number, reason: string) { close?: typeof close;
return closeOverwritten(this, code, reason);
},
/** Connect the shard with the gateway and start heartbeating. /** Connect the shard with the gateway and start heartbeating.
* This will not identify the shard to the gateway. * This will not identify the shard to the gateway.
*/ */
connect: async function () { connect?: typeof connect;
return await connectOverwritten(this);
},
/** Identify the shard to the gateway.
* If not connected, this will also connect the shard to the gateway.
*/
identify: async function () {
return await identifyOverwritten(this);
},
/** Check whether the connection to Discord is currently open. */
isOpen: function () {
return isOpenOverwritten(this);
},
/** Function which can be overwritten in order to get the shards presence. */
// This function allows to be async, in case the devs create the presence based on eg. database values.
// Passing the shard's id there to make it easier for the dev to use this function.
makePresence: options.makePresence,
/** Attempt to resume the previous shards session with the gateway. */
resume: async function () {
return await resumeOverwritten(this);
},
/** Send a message to Discord.
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
*/
send: async function (message: ShardSocketRequest, highPriority: boolean = false) {
return await sendOverwritten(this, message, highPriority);
},
/** Shutdown the shard.
* Forcefully disconnect the shard from Discord.
* The shard may not attempt to reconnect with Discord.
*/
shutdown: async function () {
return await shutdownOverwritten(this);
},
/** @private Internal shard bucket.
* Only access this if you know what you are doing.
*
* Bucket for handling shard request rate limits.
*/
bucket: createLeakyBucket({
max: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
refillAmount: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
}),
/** @private Internal shard function. /** @private Internal shard function.
* Only use this function if you know what you are doing. * Only use this function if you know what you are doing.
* *
* Handle a gateway connection close. * Handle a gateway connection close.
*/ */
handleClose: async function (close: CloseEvent) { handleClose?: typeof handleClose;
return await handleCloseOverwritten(this, close);
},
/** @private Internal shard function. /** @private Internal shard function.
* Only use this function if you know what you are doing. * Only use this function if you know what you are doing.
* *
* Handle an incoming gateway message. * Handle an incoming gateway message.
*/ */
handleMessage: async function (message: MessageEvent<any>) { handleMessage?: typeof handleMessage;
return await handleMessageOverwritten(this, message);
},
/** This function communicates with the management process, in order to know whether its free to identify. */ /** Identify the shard to the gateway.
requestIdentify: async function () { * If not connected, this will also connect the shard to the gateway.
return await options.requestIdentify(this.id);
},
/** @private Internal state.
* Only use this if you know what you are doing.
*
* Cache for pending gateway requests which should have been send while the gateway went offline.
*/ */
offlineSendQueue: [] as ((_?: unknown) => void)[], identify?: typeof identify;
/** @private Internal shard map. /** Check whether the connection to Discord is currently open. */
* Only use this map if you know what you are doing. isOpen?: typeof isOpen;
*
* This is used to resolve internal waiting states. /** Function which can be overwritten in order to get the shards presence. */
* Mapped by SelectedEvents => ResolveFunction makePresence?(shardId: number): Promise<DiscordStatusUpdate> | DiscordStatusUpdate;
/** The maximum of requests which can be send to discord per rate limit tick.
* Typically this value should not be changed.
*/ */
resolves: new Map<"READY" | "RESUMED" | "INVALID_SESSION", (payload: DiscordGatewayPayload) => void>(), maxRequestsPerRateLimitTick?: number;
/** The previous payload sequence number. */
previousSequenceNumber?: number;
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
rateLimitResetInterval?: number;
/** Attempt to resume the previous shards session with the gateway. */
resume?: typeof resume;
/** Send a message to Discord.
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
*/
send?: typeof send;
/** Shutdown the shard.
* Forcefully disconnect the shard from Discord.
* The shard may not attempt to reconnect with Discord.
*/
shutdown?: typeof shutdown;
/** @private Internal shard function. /** @private Internal shard function.
* Only use this function if you know what you are doing. * Only use this function if you know what you are doing.
* *
* Start sending heartbeat payloads to Discord in the provided interval. * Start sending heartbeat payloads to Discord in the provided interval.
*/ */
startHeartbeating: function (interval: number) { startHeartbeating?: typeof startHeartbeating;
return startHeartbeatingOverwritten(this, interval);
}, /** Current internal state of the shard. */
state?: ShardState;
/** @private Internal shard function. /** @private Internal shard function.
* Only use this function if you know what you are doing. * Only use this function if you know what you are doing.
* *
* Stop the heartbeating process with discord. * Stop the heartbeating process with discord.
*/ */
stopHeartbeating: function () { stopHeartbeating?: typeof stopHeartbeating;
return stopHeartbeatingOverwritten(this);
}, /** The shard related event handlers. */
}; events?: ShardEvents;
} /** This contains all the heartbeat information */
heart?: ShardHeart;
export interface CreateShard { /** Bucket for handling shard request rate limits. */
/** Id of the shard which should be created. */ bucket?: LeakyBucket;
id: number; /** Cache for pending gateway requests which should have been send while the gateway went offline. */
offlineSendQueue?: ShardSocketRequest[];
/** Gateway configuration for the shard. */ /** This is used to resolve internal waiting states.
gatewayConfig: PickPartial<ShardGatewayConfig, "token">; * Mapped by SelectedEvents => ResolveFunction
*/
/** The total amount of shards which are used to communicate with Discord. */ resolves?: Shard["resolves"];
totalShards: number;
/** This function communicates with the management process, in order to know whether its free to identify.
* When this function resolves, this means that the shard is allowed to send an identify payload to discord.
*/
requestIdentify: (shardId: number) => Promise<void>;
/** Calculate the amount of requests which can safely be made per rate limit interval,
* before the gateway gets disconnected due to an exceeded rate limit.
*/
calculateSafeRequests?: typeof calculateSafeRequests;
/** Close the socket connection to discord if present. */
close?: typeof close;
/** Connect the shard with the gateway and start heartbeating.
* This will not identify the shard to the gateway.
*/
connect?: typeof connect;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle a gateway connection close.
*/
handleClose?: typeof handleClose;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle an incoming gateway message.
*/
handleMessage?: typeof handleMessage;
/** Identify the shard to the gateway.
* If not connected, this will also connect the shard to the gateway.
*/
identify?: typeof identify;
/** Check whether the connection to Discord is currently open. */
isOpen?: typeof isOpen;
/** Function which can be overwritten in order to get the shards presence. */
makePresence?(shardId: number): Promise<DiscordStatusUpdate> | DiscordStatusUpdate;
/** The maximum of requests which can be send to discord per rate limit tick.
* Typically this value should not be changed.
*/
maxRequestsPerRateLimitTick?: number;
/** The previous payload sequence number. */
previousSequenceNumber?: number;
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
rateLimitResetInterval?: number;
/** Attempt to resume the previous shards session with the gateway. */
resume?: typeof resume;
/** Send a message to Discord.
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
*/
send?: typeof send;
/** Shutdown the shard.
* Forcefully disconnect the shard from Discord.
* The shard may not attempt to reconnect with Discord.
*/
shutdown?: typeof shutdown;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Start sending heartbeat payloads to Discord in the provided interval.
*/
startHeartbeating?: typeof startHeartbeating;
/** Current internal state of the shard. */
state?: ShardState;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Stop the heartbeating process with discord.
*/
stopHeartbeating?: typeof stopHeartbeating;
/** The shard related event handlers. */
events?: ShardEvents;
/** This contains all the heartbeat information */
heart?: ShardHeart;
/** Bucket for handling shard request rate limits. */
bucket?: LeakyBucket;
/** Cache for pending gateway requests which should have been send while the gateway went offline. */
offlineSendQueue?: ShardSocketRequest[];
/** This is used to resolve internal waiting states.
* Mapped by SelectedEvents => ResolveFunction
*/
resolves?: Shard["resolves"];
} }

View File

@ -2,62 +2,62 @@ import { GatewayCloseEventCodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts"; import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function handleClose(shard: Shard, close: CloseEvent): Promise<void> { export async function handleClose(shard: Shard, close: CloseEvent): Promise<void> {
// gateway.debug("GW CLOSED", { shardId, payload: event }); // gateway.debug("GW CLOSED", { shardId, payload: event });
shard.stopHeartbeating(); shard.stopHeartbeating();
switch (close.code) { switch (close.code) {
case ShardSocketCloseCodes.TestingFinished: { case ShardSocketCloseCodes.TestingFinished: {
shard.state = ShardState.Offline; shard.state = ShardState.Offline;
shard.events.disconnected?.(shard); shard.events.disconnected?.(shard);
return; return;
}
// On these codes a manual start will be done.
case ShardSocketCloseCodes.Shutdown:
case ShardSocketCloseCodes.ReIdentifying:
case ShardSocketCloseCodes.Resharded:
case ShardSocketCloseCodes.ResumeClosingOldConnection:
case ShardSocketCloseCodes.ZombiedConnection: {
shard.state = ShardState.Disconnected;
shard.events.disconnected?.(shard);
// gateway.debug("GW CLOSED_RECONNECT", { shardId, payload: event });
return;
}
// Gateway connection closes which require a new identify.
case GatewayCloseEventCodes.UnknownOpcode:
case GatewayCloseEventCodes.NotAuthenticated:
case GatewayCloseEventCodes.InvalidSeq:
case GatewayCloseEventCodes.RateLimited:
case GatewayCloseEventCodes.SessionTimedOut: {
shard.state = ShardState.Identifying;
shard.events.disconnected?.(shard);
return await shard.identify();
}
// When these codes are received something went really wrong.
// On those we cannot start a reconnect attempt.
case GatewayCloseEventCodes.AuthenticationFailed:
case GatewayCloseEventCodes.InvalidShard:
case GatewayCloseEventCodes.ShardingRequired:
case GatewayCloseEventCodes.InvalidApiVersion:
case GatewayCloseEventCodes.InvalidIntents:
case GatewayCloseEventCodes.DisallowedIntents: {
shard.state = ShardState.Offline;
shard.events.disconnected?.(shard);
throw new Error(close.reason || "Discord gave no reason! GG! You broke Discord!");
}
// Gateway connection closes on which a resume is allowed.
case GatewayCloseEventCodes.UnknownError:
case GatewayCloseEventCodes.DecodeError:
case GatewayCloseEventCodes.AlreadyAuthenticated:
default: {
shard.state = ShardState.Resuming;
shard.events.disconnected?.(shard);
return await shard.resume();
}
} }
// On these codes a manual start will be done.
case ShardSocketCloseCodes.Shutdown:
case ShardSocketCloseCodes.ReIdentifying:
case ShardSocketCloseCodes.Resharded:
case ShardSocketCloseCodes.ResumeClosingOldConnection:
case ShardSocketCloseCodes.ZombiedConnection: {
shard.state = ShardState.Disconnected;
shard.events.disconnected?.(shard);
// gateway.debug("GW CLOSED_RECONNECT", { shardId, payload: event });
return;
}
// Gateway connection closes which require a new identify.
case GatewayCloseEventCodes.UnknownOpcode:
case GatewayCloseEventCodes.NotAuthenticated:
case GatewayCloseEventCodes.InvalidSeq:
case GatewayCloseEventCodes.RateLimited:
case GatewayCloseEventCodes.SessionTimedOut: {
shard.state = ShardState.Identifying;
shard.events.disconnected?.(shard);
return await shard.identify();
}
// When these codes are received something went really wrong.
// On those we cannot start a reconnect attempt.
case GatewayCloseEventCodes.AuthenticationFailed:
case GatewayCloseEventCodes.InvalidShard:
case GatewayCloseEventCodes.ShardingRequired:
case GatewayCloseEventCodes.InvalidApiVersion:
case GatewayCloseEventCodes.InvalidIntents:
case GatewayCloseEventCodes.DisallowedIntents: {
shard.state = ShardState.Offline;
shard.events.disconnected?.(shard);
throw new Error(close.reason || "Discord gave no reason! GG! You broke Discord!");
}
// Gateway connection closes on which a resume is allowed.
case GatewayCloseEventCodes.UnknownError:
case GatewayCloseEventCodes.DecodeError:
case GatewayCloseEventCodes.AlreadyAuthenticated:
default: {
shard.state = ShardState.Resuming;
shard.events.disconnected?.(shard);
return await shard.resume();
}
}
} }

View File

@ -8,149 +8,149 @@ import { GATEWAY_RATE_LIMIT_RESET_INTERVAL, Shard, ShardState } from "./types.ts
const decoder = new TextDecoder(); const decoder = new TextDecoder();
export async function handleMessage(shard: Shard, message: MessageEvent<any>): Promise<void> { export async function handleMessage(shard: Shard, message: MessageEvent<any>): Promise<void> {
message = message.data; message = message.data;
// If message compression is enabled, // If message compression is enabled,
// Discord might send zlib compressed payloads. // Discord might send zlib compressed payloads.
if (shard.gatewayConfig.compress && message instanceof Blob) { if (shard.gatewayConfig.compress && message instanceof Blob) {
message = decompressWith( message = decompressWith(
new Uint8Array(await message.arrayBuffer()), new Uint8Array(await message.arrayBuffer()),
0, 0,
(slice: Uint8Array) => decoder.decode(slice), (slice: Uint8Array) => decoder.decode(slice),
); );
}
// Safeguard incase decompression failed to make a string.
if (typeof message !== "string") return;
const messageData = JSON.parse(message) as DiscordGatewayPayload;
// gateway.debug("GW RAW", { shardId, payload: messageData });
// TODO: remove
// console.log({ messageData: censor(messageData) });
switch (messageData.op) {
case GatewayOpcodes.Heartbeat: {
// TODO: can this actually happen
if (!shard.isOpen()) return;
shard.heart.lastBeat = Date.now();
// Discord randomly sends this requiring an immediate heartbeat back.
// Using a direct socket.send call here because heartbeat requests are reserved by us.
shard.socket?.send(
JSON.stringify({
op: GatewayOpcodes.Heartbeat,
d: shard.previousSequenceNumber,
}),
);
shard.events.heartbeat?.(shard);
break;
} }
case GatewayOpcodes.Hello: {
const interval = (messageData.d as DiscordHello).heartbeat_interval;
shard.startHeartbeating(interval); // Safeguard incase decompression failed to make a string.
if (typeof message !== "string") return;
if (shard.state !== ShardState.Resuming) { const messageData = JSON.parse(message) as DiscordGatewayPayload;
// HELLO has been send on a non resume action. // gateway.debug("GW RAW", { shardId, payload: messageData });
// This means that the shard starts a new session,
// therefore the rate limit interval has been reset too.
shard.bucket = createLeakyBucket({
max: shard.calculateSafeRequests(),
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
refillAmount: shard.calculateSafeRequests(),
// Waiting acquires should not be lost on a re-identify.
waiting: shard.bucket.waiting,
});
}
shard.events.hello?.(shard); // TODO: remove
// console.log({ messageData: censor(messageData) });
break; switch (messageData.op) {
case GatewayOpcodes.Heartbeat: {
// TODO: can this actually happen
if (!shard.isOpen()) return;
shard.heart.lastBeat = Date.now();
// Discord randomly sends this requiring an immediate heartbeat back.
// Using a direct socket.send call here because heartbeat requests are reserved by us.
shard.socket?.send(
JSON.stringify({
op: GatewayOpcodes.Heartbeat,
d: shard.previousSequenceNumber,
}),
);
shard.events.heartbeat?.(shard);
break;
}
case GatewayOpcodes.Hello: {
const interval = (messageData.d as DiscordHello).heartbeat_interval;
shard.startHeartbeating(interval);
if (shard.state !== ShardState.Resuming) {
// HELLO has been send on a non resume action.
// This means that the shard starts a new session,
// therefore the rate limit interval has been reset too.
shard.bucket = createLeakyBucket({
max: shard.calculateSafeRequests(),
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
refillAmount: shard.calculateSafeRequests(),
// Waiting acquires should not be lost on a re-identify.
waiting: shard.bucket.waiting,
});
}
shard.events.hello?.(shard);
break;
}
case GatewayOpcodes.HeartbeatACK: {
shard.heart.acknowledged = true;
shard.heart.lastAck = Date.now();
// Manually calculating the round trip time for users who need it.
if (shard.heart.lastBeat) {
shard.heart.rtt = shard.heart.lastAck - shard.heart.lastBeat;
}
shard.events.heartbeatAck?.(shard);
break;
}
case GatewayOpcodes.Reconnect: {
// gateway.debug("GW RECONNECT", { shardId });
shard.events.requestedReconnect?.(shard);
await shard.resume();
break;
}
case GatewayOpcodes.InvalidSession: {
// gateway.debug("GW INVALID_SESSION", { shardId, payload: messageData });
const resumable = messageData.d as boolean;
shard.events.invalidSession?.(shard, resumable);
// We need to wait for a random amount of time between 1 and 5
// Reference: https://discord.com/developers/docs/topics/gateway#resuming
await delay(Math.floor((Math.random() * 4 + 1) * 1000));
shard.resolves.get("INVALID_SESSION")?.(messageData);
shard.resolves.delete("INVALID_SESSION");
// When resumable is false we need to re-identify
if (!resumable) {
await shard.identify();
break;
}
// The session is invalid but apparently it is resumable
await shard.resume();
break;
}
} }
case GatewayOpcodes.HeartbeatACK: {
shard.heart.acknowledged = true;
shard.heart.lastAck = Date.now();
// Manually calculating the round trip time for users who need it.
if (shard.heart.lastBeat) {
shard.heart.rtt = shard.heart.lastAck - shard.heart.lastBeat;
}
shard.events.heartbeatAck?.(shard); if (messageData.t === "RESUMED") {
// gateway.debug("GW RESUMED", { shardId });
break; shard.state = ShardState.Connected;
shard.events.resumed?.(shard);
// Continue the requests which have been queued since the shard went offline.
shard.offlineSendQueue.map((resolve) => resolve());
shard.resolves.get("RESUMED")?.(messageData);
shard.resolves.delete("RESUMED");
} // Important for future resumes.
else if (messageData.t === "READY") {
const payload = messageData.d as DiscordReady;
shard.sessionId = payload.session_id;
shard.state = ShardState.Connected;
// Continue the requests which have been queued since the shard went offline.
// Important when this is a re-identify
shard.offlineSendQueue.map((resolve) => resolve());
shard.resolves.get("READY")?.(messageData);
shard.resolves.delete("READY");
} }
case GatewayOpcodes.Reconnect: {
// gateway.debug("GW RECONNECT", { shardId });
shard.events.requestedReconnect?.(shard); // Update the sequence number if it is present
// `s` can be either `null` or a `number`.
await shard.resume(); // In order to prevent update misses when `s` is `0` we check against null.
if (messageData.s !== null) {
break; shard.previousSequenceNumber = messageData.s;
} }
case GatewayOpcodes.InvalidSession: {
// gateway.debug("GW INVALID_SESSION", { shardId, payload: messageData });
const resumable = messageData.d as boolean;
shard.events.invalidSession?.(shard, resumable); // The necessary handling required for the Shards connection has been finished.
// Now the event can be safely forwarded.
// We need to wait for a random amount of time between 1 and 5 shard.events.message?.(shard, messageData);
// Reference: https://discord.com/developers/docs/topics/gateway#resuming
await delay(Math.floor((Math.random() * 4 + 1) * 1000));
shard.resolves.get("INVALID_SESSION")?.(messageData);
shard.resolves.delete("INVALID_SESSION");
// When resumable is false we need to re-identify
if (!resumable) {
await shard.identify();
break;
}
// The session is invalid but apparently it is resumable
await shard.resume();
break;
}
}
if (messageData.t === "RESUMED") {
// gateway.debug("GW RESUMED", { shardId });
shard.state = ShardState.Connected;
shard.events.resumed?.(shard);
// Continue the requests which have been queued since the shard went offline.
shard.offlineSendQueue.map((resolve) => resolve());
shard.resolves.get("RESUMED")?.(messageData);
shard.resolves.delete("RESUMED");
} // Important for future resumes.
else if (messageData.t === "READY") {
const payload = messageData.d as DiscordReady;
shard.sessionId = payload.session_id;
shard.state = ShardState.Connected;
// Continue the requests which have been queued since the shard went offline.
// Important when this is a re-identify
shard.offlineSendQueue.map((resolve) => resolve());
shard.resolves.get("READY")?.(messageData);
shard.resolves.delete("READY");
}
// Update the sequence number if it is present
// `s` can be either `null` or a `number`.
// In order to prevent update misses when `s` is `0` we check against null.
if (messageData.s !== null) {
shard.previousSequenceNumber = messageData.s;
}
// The necessary handling required for the Shards connection has been finished.
// Now the event can be safely forwarded.
shard.events.message?.(shard, messageData);
} }

View File

@ -2,49 +2,49 @@ import { GatewayOpcodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts"; import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function identify(shard: Shard): Promise<void> { export async function identify(shard: Shard): Promise<void> {
// A new identify has been requested even though there is already a connection open. // A new identify has been requested even though there is already a connection open.
// Therefore we need to close the old connection and heartbeating before creating a new one. // Therefore we need to close the old connection and heartbeating before creating a new one.
if (shard.state === ShardState.Connected) { if (shard.state === ShardState.Connected) {
console.log("CLOSING EXISTING SHARD: #" + shard.id); console.log("CLOSING EXISTING SHARD: #" + shard.id);
shard.close(ShardSocketCloseCodes.ReIdentifying, "Re-identifying closure of old connection."); shard.close(ShardSocketCloseCodes.ReIdentifying, "Re-identifying closure of old connection.");
} }
shard.state = ShardState.Identifying; shard.state = ShardState.Identifying;
shard.events.identifying?.(shard); shard.events.identifying?.(shard);
// It is possible that the shard is in Heartbeating state but not identified, // It is possible that the shard is in Heartbeating state but not identified,
// so check whether there is already a gateway connection existing. // so check whether there is already a gateway connection existing.
// If not we need to create one before we identify. // If not we need to create one before we identify.
if (!shard.isOpen()) { if (!shard.isOpen()) {
await shard.connect(); await shard.connect();
} }
// Wait until an identify is free for this shard. // Wait until an identify is free for this shard.
await shard.requestIdentify(); await shard.requestIdentify();
shard.send({ shard.send({
op: GatewayOpcodes.Identify, op: GatewayOpcodes.Identify,
d: { d: {
token: `Bot ${shard.gatewayConfig.token}`, token: `Bot ${shard.gatewayConfig.token}`,
compress: shard.gatewayConfig.compress, compress: shard.gatewayConfig.compress,
properties: shard.gatewayConfig.properties, properties: shard.gatewayConfig.properties,
intents: shard.gatewayConfig.intents, intents: shard.gatewayConfig.intents,
shard: [shard.id, shard.totalShards], shard: [shard.id, shard.totalShards],
presence: await shard.makePresence?.(shard.id), presence: await shard.makePresence?.(shard.id),
}, },
}, true); }, true);
return new Promise((resolve) => { return new Promise((resolve) => {
shard.resolves.set("READY", () => { shard.resolves.set("READY", () => {
shard.events.identified?.(shard); shard.events.identified?.(shard);
resolve(); resolve();
});
// When identifying too fast,
// Discord sends an invalid session payload.
// This can safely be ignored though and the shard starts a new identify action.
shard.resolves.set("INVALID_SESSION", () => {
shard.resolves.delete("READY");
resolve();
});
}); });
// When identifying too fast,
// Discord sends an invalid session payload.
// This can safely be ignored though and the shard starts a new identify action.
shard.resolves.set("INVALID_SESSION", () => {
shard.resolves.delete("READY");
resolve();
});
});
} }

View File

@ -1,5 +1,5 @@
import { Shard } from "./types.ts"; import { Shard } from "./types.ts";
export function isOpen(shard: Shard): boolean { export function isOpen(shard: Shard): boolean {
return shard.socket?.readyState === WebSocket.OPEN; return shard.socket?.readyState === WebSocket.OPEN;
} }

View File

@ -2,47 +2,50 @@ import { GatewayOpcodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts"; import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function resume(shard: Shard): Promise<void> { export async function resume(shard: Shard): Promise<void> {
// gateway.debug("GW RESUMING", { shardId }); // gateway.debug("GW RESUMING", { shardId });
// It has been requested to resume the Shards session. // It has been requested to resume the Shards session.
// It's possible that the shard is still connected with Discord's gateway therefore we need to forcefully close it. // It's possible that the shard is still connected with Discord's gateway therefore we need to forcefully close it.
if (shard.isOpen()) { if (shard.isOpen()) {
shard.close(ShardSocketCloseCodes.ResumeClosingOldConnection, "Reconnecting the shard, closing old connection."); shard.close(
} ShardSocketCloseCodes.ResumeClosingOldConnection,
"Reconnecting the shard, closing old connection.",
);
}
// Shard has never identified, so we cannot resume. // Shard has never identified, so we cannot resume.
if (!shard.sessionId) { if (!shard.sessionId) {
// gateway.debug( // gateway.debug(
// "GW DEBUG", // "GW DEBUG",
// `[Error] Trying to resume a shard (id: ${shardId}) that was not first identified.`, // `[Error] Trying to resume a shard (id: ${shardId}) that was not first identified.`,
// ); // );
return await shard.identify(); return await shard.identify();
// throw new Error(`[SHARD] Trying to resume a shard (id: ${shard.id}) which was never identified`); // throw new Error(`[SHARD] Trying to resume a shard (id: ${shard.id}) which was never identified`);
} }
shard.state = ShardState.Resuming; shard.state = ShardState.Resuming;
// Before we can resume, we need to create a new connection with Discord's gateway. // Before we can resume, we need to create a new connection with Discord's gateway.
await shard.connect(); await shard.connect();
shard.send({ shard.send({
op: GatewayOpcodes.Resume, op: GatewayOpcodes.Resume,
d: { d: {
token: `Bot ${shard.gatewayConfig.token}`, token: `Bot ${shard.gatewayConfig.token}`,
session_id: shard.sessionId, session_id: shard.sessionId,
seq: shard.previousSequenceNumber ?? 0, seq: shard.previousSequenceNumber ?? 0,
}, },
}, true); }, true);
return new Promise((resolve) => { return new Promise((resolve) => {
shard.resolves.set("RESUMED", () => resolve()); shard.resolves.set("RESUMED", () => resolve());
// If it is attempted to resume with an invalid session id, // If it is attempted to resume with an invalid session id,
// Discord sends an invalid session payload // Discord sends an invalid session payload
// Not erroring here since it is easy that this happens, also it would be not catchable // Not erroring here since it is easy that this happens, also it would be not catchable
shard.resolves.set("INVALID_SESSION", () => { shard.resolves.set("INVALID_SESSION", () => {
shard.resolves.delete("RESUMED"); shard.resolves.delete("RESUMED");
resolve(); resolve();
});
}); });
});
} }

View File

@ -1,27 +1,27 @@
import { Shard, ShardSocketRequest } from "./types.ts"; import { Shard, ShardSocketRequest } from "./types.ts";
async function checkOffline(shard: Shard, highPriority: boolean): Promise<void> { async function checkOffline(shard: Shard, highPriority: boolean): Promise<void> {
if (!shard.isOpen()) { if (!shard.isOpen()) {
await new Promise((resolve) => { await new Promise((resolve) => {
if (highPriority) { if (highPriority) {
// Higher priority requests get added at the beginning of the array. // Higher priority requests get added at the beginning of the array.
shard.offlineSendQueue.unshift(resolve); shard.offlineSendQueue.unshift(resolve);
} else { } else {
shard.offlineSendQueue.push(resolve); shard.offlineSendQueue.push(resolve);
} }
}); });
} }
} }
export async function send(shard: Shard, message: ShardSocketRequest, highPriority: boolean): Promise<void> { export async function send(shard: Shard, message: ShardSocketRequest, highPriority: boolean): Promise<void> {
// Before acquiring a token from the bucket, check whether the shard is currently offline or not. // Before acquiring a token from the bucket, check whether the shard is currently offline or not.
// Else bucket and token wait time just get wasted. // Else bucket and token wait time just get wasted.
await checkOffline(shard, highPriority); await checkOffline(shard, highPriority);
await shard.bucket.acquire(1, highPriority); await shard.bucket.acquire(1, highPriority);
// It's possible, that the shard went offline after a token has been acquired from the bucket. // It's possible, that the shard went offline after a token has been acquired from the bucket.
await checkOffline(shard, highPriority); await checkOffline(shard, highPriority);
shard.socket?.send(JSON.stringify(message)); shard.socket?.send(JSON.stringify(message));
} }

View File

@ -1,6 +1,6 @@
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts"; import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function shutdown(shard: Shard): Promise<void> { export async function shutdown(shard: Shard): Promise<void> {
shard.close(ShardSocketCloseCodes.Shutdown, "Shard shutting down."); shard.close(ShardSocketCloseCodes.Shutdown, "Shard shutting down.");
shard.state = ShardState.Offline; shard.state = ShardState.Offline;
} }

View File

@ -2,63 +2,63 @@ import { GatewayOpcodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts"; import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export function startHeartbeating(shard: Shard, interval: number) { export function startHeartbeating(shard: Shard, interval: number) {
// gateway.debug("GW HEARTBEATING_STARTED", { shardId, interval }); // gateway.debug("GW HEARTBEATING_STARTED", { shardId, interval });
shard.heart.interval = interval; shard.heart.interval = interval;
// Only set the shard's state to `Unidentified` // Only set the shard's state to `Unidentified`
// if heartbeating has not been started due to an identify or resume action. // if heartbeating has not been started due to an identify or resume action.
if ([ShardState.Disconnected, ShardState.Offline].includes(shard.state)) { if ([ShardState.Disconnected, ShardState.Offline].includes(shard.state)) {
shard.state = ShardState.Unidentified; shard.state = ShardState.Unidentified;
} }
// The first heartbeat needs to be send with a random delay between `0` and `interval` // The first heartbeat needs to be send with a random delay between `0` and `interval`
// Using a `setTimeout(_, jitter)` here to accomplish that. // Using a `setTimeout(_, jitter)` here to accomplish that.
// `Math.random()` can be `0` so we use `0.5` if this happens // `Math.random()` can be `0` so we use `0.5` if this happens
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating // Reference: https://discord.com/developers/docs/topics/gateway#heartbeating
const jitter = Math.ceil(shard.heart.interval * (Math.random() || 0.5)); const jitter = Math.ceil(shard.heart.interval * (Math.random() || 0.5));
shard.heart.timeoutId = setTimeout(() => { shard.heart.timeoutId = setTimeout(() => {
// Using a direct socket.send call here because heartbeat requests are reserved by us. // Using a direct socket.send call here because heartbeat requests are reserved by us.
shard.socket?.send(JSON.stringify({ shard.socket?.send(JSON.stringify({
op: GatewayOpcodes.Heartbeat, op: GatewayOpcodes.Heartbeat,
d: shard.previousSequenceNumber, d: shard.previousSequenceNumber,
})); }));
shard.heart.lastBeat = Date.now(); shard.heart.lastBeat = Date.now();
shard.heart.acknowledged = false; shard.heart.acknowledged = false;
// After the random heartbeat jitter we can start a normal interval. // After the random heartbeat jitter we can start a normal interval.
shard.heart.intervalId = setInterval(async () => { shard.heart.intervalId = setInterval(async () => {
// gateway.debug("GW DEBUG", `Running setInterval in heartbeat file. Shard: ${shardId}`); // gateway.debug("GW DEBUG", `Running setInterval in heartbeat file. Shard: ${shardId}`);
// gateway.debug("GW HEARTBEATING", { shardId, shard: currentShard }); // gateway.debug("GW HEARTBEATING", { shardId, shard: currentShard });
// The Shard did not receive a heartbeat ACK from Discord in time, // The Shard did not receive a heartbeat ACK from Discord in time,
// therefore we have to assume that the connection has failed or got "zombied". // therefore we have to assume that the connection has failed or got "zombied".
// The Shard needs to start a re-identify action accordingly. // The Shard needs to start a re-identify action accordingly.
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating-example-gateway-heartbeat-ack // Reference: https://discord.com/developers/docs/topics/gateway#heartbeating-example-gateway-heartbeat-ack
if (!shard.heart.acknowledged) { if (!shard.heart.acknowledged) {
shard.close( shard.close(
ShardSocketCloseCodes.ZombiedConnection, ShardSocketCloseCodes.ZombiedConnection,
"Zombied connection, did not receive an heartbeat ACK in time.", "Zombied connection, did not receive an heartbeat ACK in time.",
); );
return await shard.identify(); return await shard.identify();
} }
shard.heart.acknowledged = false; shard.heart.acknowledged = false;
// Using a direct socket.send call here because heartbeat requests are reserved by us. // Using a direct socket.send call here because heartbeat requests are reserved by us.
shard.socket?.send( shard.socket?.send(
JSON.stringify({ JSON.stringify({
op: GatewayOpcodes.Heartbeat, op: GatewayOpcodes.Heartbeat,
d: shard.previousSequenceNumber, d: shard.previousSequenceNumber,
}), }),
); );
shard.heart.lastBeat = Date.now(); shard.heart.lastBeat = Date.now();
shard.events.heartbeat?.(shard); shard.events.heartbeat?.(shard);
}, shard.heart.interval); }, shard.heart.interval);
}, jitter); }, jitter);
} }

View File

@ -1,9 +1,9 @@
import { Shard } from "./types.ts"; import { Shard } from "./types.ts";
export function stopHeartbeating(shard: Shard): void { export function stopHeartbeating(shard: Shard): void {
// Clear the regular heartbeat interval. // Clear the regular heartbeat interval.
clearInterval(shard.heart.intervalId); clearInterval(shard.heart.intervalId);
// It's possible that the Shard got closed before the first jittered heartbeat. // It's possible that the Shard got closed before the first jittered heartbeat.
// To go safe we should clear the related timeout too. // To go safe we should clear the related timeout too.
clearTimeout(shard.heart.timeoutId); clearTimeout(shard.heart.timeoutId);
} }

View File

@ -11,138 +11,138 @@ export const DEFAULT_HEARTBEAT_INTERVAL = 45000;
export type Shard = ReturnType<typeof createShard>; export type Shard = ReturnType<typeof createShard>;
export enum ShardState { export enum ShardState {
/** Shard is fully connected to the gateway and receiving events from Discord. */ /** Shard is fully connected to the gateway and receiving events from Discord. */
Connected = 0, Connected = 0,
/** Shard started to connect to the gateway. /** Shard started to connect to the gateway.
* This is only used if the shard is not currently trying to identify or resume. * This is only used if the shard is not currently trying to identify or resume.
*/ */
Connecting = 1, Connecting = 1,
/** Shard got disconnected and reconnection actions have been started. */ /** Shard got disconnected and reconnection actions have been started. */
Disconnected = 2, Disconnected = 2,
/** The shard is connected to the gateway but only heartbeating. /** The shard is connected to the gateway but only heartbeating.
* At this state the shard has not been identified with discord. * At this state the shard has not been identified with discord.
*/ */
Unidentified = 3, Unidentified = 3,
/** Shard is trying to identify with the gateway to create a new session. */ /** Shard is trying to identify with the gateway to create a new session. */
Identifying = 4, Identifying = 4,
/** Shard is trying to resume a session with the gateway. */ /** Shard is trying to resume a session with the gateway. */
Resuming = 5, Resuming = 5,
/** Shard got shut down studied or due to a not (self) fixable error and may not attempt to reconnect on its own. */ /** Shard got shut down studied or due to a not (self) fixable error and may not attempt to reconnect on its own. */
Offline = 6, Offline = 6,
} }
export interface ShardGatewayConfig { export interface ShardGatewayConfig {
/** Whether incoming payloads are compressed using zlib. /** Whether incoming payloads are compressed using zlib.
*
* @default false
*/
compress: boolean;
/** The calculated intent value of the events which the shard should receive.
*
* @default 0
*/
intents: number;
/** Identify properties to use */
properties: {
/** Operating system the shard runs on.
* *
* @default "darwin" | "linux" | "windows" * @default false
*/ */
os: string; compress: boolean;
/** The "browser" where this shard is running on. /** The calculated intent value of the events which the shard should receive.
* *
* @default "Discordeno" * @default 0
*/ */
browser: string; intents: number;
/** The device on which the shard is running. /** Identify properties to use */
properties: {
/** Operating system the shard runs on.
*
* @default "darwin" | "linux" | "windows"
*/
os: string;
/** The "browser" where this shard is running on.
*
* @default "Discordeno"
*/
browser: string;
/** The device on which the shard is running.
*
* @default "Discordeno"
*/
device: string;
};
/** Bot token which is used to connect to Discord */
token: string;
/** The URL of the gateway which should be connected to.
* *
* @default "Discordeno" * @default "wss://gateway.discord.gg"
*/ */
device: string; url: string;
}; /** The gateway version which should be used.
/** Bot token which is used to connect to Discord */ *
token: string; * @default 10
/** The URL of the gateway which should be connected to. */
* version: number;
* @default "wss://gateway.discord.gg"
*/
url: string;
/** The gateway version which should be used.
*
* @default 10
*/
version: number;
} }
export interface ShardHeart { export interface ShardHeart {
/** Whether or not the heartbeat was acknowledged by Discord in time. */ /** Whether or not the heartbeat was acknowledged by Discord in time. */
acknowledged: boolean; acknowledged: boolean;
/** Interval between heartbeats requested by Discord. */ /** Interval between heartbeats requested by Discord. */
interval: number; interval: number;
/** Id of the interval, which is used for sending the heartbeats. */ /** Id of the interval, which is used for sending the heartbeats. */
intervalId?: number; intervalId?: number;
/** Unix (in milliseconds) timestamp when the last heartbeat ACK was received from Discord. */ /** Unix (in milliseconds) timestamp when the last heartbeat ACK was received from Discord. */
lastAck?: number; lastAck?: number;
/** Unix timestamp (in milliseconds) when the last heartbeat was sent. */ /** Unix timestamp (in milliseconds) when the last heartbeat was sent. */
lastBeat?: number; lastBeat?: number;
/** Round trip time (in milliseconds) from Shard to Discord and back. /** Round trip time (in milliseconds) from Shard to Discord and back.
* Calculated using the heartbeat system. * Calculated using the heartbeat system.
* Note: this value is undefined until the first heartbeat to Discord has happened. * Note: this value is undefined until the first heartbeat to Discord has happened.
*/ */
rtt?: number; rtt?: number;
/** Id of the timeout which is used for sending the first heartbeat to Discord since it's "special". */ /** Id of the timeout which is used for sending the first heartbeat to Discord since it's "special". */
timeoutId?: number; timeoutId?: number;
} }
export interface ShardEvents { export interface ShardEvents {
/** A heartbeat has been send. */ /** A heartbeat has been send. */
heartbeat?(shard: Shard): unknown; heartbeat?(shard: Shard): unknown;
/** A heartbeat ACK was received. */ /** A heartbeat ACK was received. */
heartbeatAck?(shard: Shard): unknown; heartbeatAck?(shard: Shard): unknown;
/** Shard has received a Hello payload. */ /** Shard has received a Hello payload. */
hello?(shard: Shard): unknown; hello?(shard: Shard): unknown;
/** The Shards session has been invalidated. */ /** The Shards session has been invalidated. */
invalidSession?(shard: Shard, resumable: boolean): unknown; invalidSession?(shard: Shard, resumable: boolean): unknown;
/** The shard has started a resume action. */ /** The shard has started a resume action. */
resuming?(shard: Shard): unknown; resuming?(shard: Shard): unknown;
/** The shard has successfully resumed an old session. */ /** The shard has successfully resumed an old session. */
resumed?(shard: Shard): unknown; resumed?(shard: Shard): unknown;
/** Discord has requested the Shard to reconnect. */ /** Discord has requested the Shard to reconnect. */
requestedReconnect?(shard: Shard): unknown; requestedReconnect?(shard: Shard): unknown;
/** The shard started to connect to Discord's gateway. */ /** The shard started to connect to Discord's gateway. */
connecting?(shard: Shard): unknown; connecting?(shard: Shard): unknown;
/** The shard is connected with Discord's gateway. */ /** The shard is connected with Discord's gateway. */
connected?(shard: Shard): unknown; connected?(shard: Shard): unknown;
/** The shard has been disconnected from Discord's gateway. */ /** The shard has been disconnected from Discord's gateway. */
disconnected?(shard: Shard): unknown; disconnected?(shard: Shard): unknown;
/** The shard has started to identify itself to Discord. */ /** The shard has started to identify itself to Discord. */
identifying?(shard: Shard): unknown; identifying?(shard: Shard): unknown;
/** The shard has successfully been identified itself with Discord. */ /** The shard has successfully been identified itself with Discord. */
identified?(shard: Shard): unknown; identified?(shard: Shard): unknown;
/** The shard has received a message from Discord. */ /** The shard has received a message from Discord. */
message?(shard: Shard, payload: DiscordGatewayPayload): unknown; message?(shard: Shard, payload: DiscordGatewayPayload): unknown;
} }
export enum ShardSocketCloseCodes { export enum ShardSocketCloseCodes {
/** A regular Shard shutdown. */ /** A regular Shard shutdown. */
Shutdown = 3000, Shutdown = 3000,
/** A resume has been requested and therefore the old connection needs to be closed. */ /** A resume has been requested and therefore the old connection needs to be closed. */
ResumeClosingOldConnection = 3024, ResumeClosingOldConnection = 3024,
/** Did not receive a heartbeat ACK in time. /** Did not receive a heartbeat ACK in time.
* Closing the shard and creating a new session. * Closing the shard and creating a new session.
*/ */
ZombiedConnection = 3010, ZombiedConnection = 3010,
/** Discordeno's gateway tests hae been finished, therefore the Shard can be turned off. */ /** Discordeno's gateway tests hae been finished, therefore the Shard can be turned off. */
TestingFinished = 3064, TestingFinished = 3064,
/** Special close code reserved for Discordeno's zero-downtime resharding system. */ /** Special close code reserved for Discordeno's zero-downtime resharding system. */
Resharded = 3065, Resharded = 3065,
/** Shard is re-identifying therefore the old connection needs to be closed. */ /** Shard is re-identifying therefore the old connection needs to be closed. */
ReIdentifying = 3066, ReIdentifying = 3066,
} }
export interface ShardSocketRequest { export interface ShardSocketRequest {
/** The OP-Code for the payload to send. */ /** The OP-Code for the payload to send. */
op: GatewayOpcodes; op: GatewayOpcodes;
/** Payload data. */ /** Payload data. */
d: unknown; d: unknown;
} }

View File

@ -2,16 +2,16 @@ import { RestManager } from "./restManager.ts";
/** Check the rate limits for a url or a bucket. */ /** Check the rate limits for a url or a bucket. */
export function checkRateLimits(rest: RestManager, url: string) { export function checkRateLimits(rest: RestManager, url: string) {
const ratelimited = rest.rateLimitedPaths.get(url); const ratelimited = rest.rateLimitedPaths.get(url);
const global = rest.rateLimitedPaths.get("global"); const global = rest.rateLimitedPaths.get("global");
const now = Date.now(); const now = Date.now();
if (ratelimited && now < ratelimited.resetTimestamp) { if (ratelimited && now < ratelimited.resetTimestamp) {
return ratelimited.resetTimestamp - now; return ratelimited.resetTimestamp - now;
} }
if (global && now < global.resetTimestamp) { if (global && now < global.resetTimestamp) {
return global.resetTimestamp - now; return global.resetTimestamp - now;
} }
return false; return false;
} }

View File

@ -2,13 +2,13 @@ import { RestManager } from "./restManager.ts";
/** Cleans up the queues by checking if there is nothing left and removing it. */ /** Cleans up the queues by checking if there is nothing left and removing it. */
export function cleanupQueues(rest: RestManager) { export function cleanupQueues(rest: RestManager) {
for (const [key, queue] of rest.pathQueues) { for (const [key, queue] of rest.pathQueues) {
rest.debug(`[REST - cleanupQueues] Running for of loop. ${key}`); rest.debug(`[REST - cleanupQueues] Running for of loop. ${key}`);
if (queue.requests.length) continue; if (queue.requests.length) continue;
// REMOVE IT FROM CACHE // REMOVE IT FROM CACHE
rest.pathQueues.delete(key); rest.pathQueues.delete(key);
} }
// NO QUEUE LEFT, DISABLE THE QUEUE // NO QUEUE LEFT, DISABLE THE QUEUE
if (!rest.pathQueues.size) rest.processingQueue = false; if (!rest.pathQueues.size) rest.processingQueue = false;
} }

View File

@ -1,6 +1,6 @@
import { RestRequestRejection } from "./rest.ts"; import { RestRequestRejection } from "./rest.ts";
export function convertRestError(errorStack: Error, data: RestRequestRejection): Error { export function convertRestError(errorStack: Error, data: RestRequestRejection): Error {
errorStack.message = `[${data.status}] ${data.error}\n${data.body}`; errorStack.message = `[${data.status}] ${data.error}\n${data.body}`;
return errorStack; return errorStack;
} }

View File

@ -6,62 +6,62 @@ import { RequestMethod, RestPayload, RestRequest } from "./rest.ts";
/** Creates the request body and headers that are necessary to send a request. Will handle different types of methods and everything necessary for discord. */ /** Creates the request body and headers that are necessary to send a request. Will handle different types of methods and everything necessary for discord. */
// export function createRequestBody(rest: RestManager, queuedRequest: { request: RestRequest; payload: RestPayload }) { // export function createRequestBody(rest: RestManager, queuedRequest: { request: RestRequest; payload: RestPayload }) {
export function createRequestBody(rest: RestManager, options: CreateRequestBodyOptions) { export function createRequestBody(rest: RestManager, options: CreateRequestBodyOptions) {
const headers: Record<string, string> = { const headers: Record<string, string> = {
"user-agent": USER_AGENT, "user-agent": USER_AGENT,
}; };
if (!options.unauthorized) headers["authorization"] = `Bot ${rest.token}`; if (!options.unauthorized) headers["authorization"] = `Bot ${rest.token}`;
// SOMETIMES SPECIAL HEADERS (E.G. CUSTOM AUTHORIZATION) NEED TO BE USED // SOMETIMES SPECIAL HEADERS (E.G. CUSTOM AUTHORIZATION) NEED TO BE USED
if (options.headers) { if (options.headers) {
for (const key in options.headers) { for (const key in options.headers) {
headers[key.toLowerCase()] = options.headers[key]; headers[key.toLowerCase()] = options.headers[key];
} }
}
// GET METHODS SHOULD NOT HAVE A BODY
if (options.method === "GET") {
options.body = undefined;
}
// IF A REASON IS PROVIDED ENCODE IT IN HEADERS
if (options.body?.reason) {
headers["X-Audit-Log-Reason"] = encodeURIComponent(options.body.reason as string);
options.body.reason = undefined;
}
// IF A FILE/ATTACHMENT IS PRESENT WE NEED SPECIAL HANDLING
if (options.body?.file) {
if (!Array.isArray(options.body.file)) {
options.body.file = [options.body.file];
} }
const form = new FormData(); // GET METHODS SHOULD NOT HAVE A BODY
if (options.method === "GET") {
for (let i = 0; i < (options.body.file as FileContent[]).length; i++) { options.body = undefined;
form.append(
`file${i}`,
(options.body.file as FileContent[])[i].blob,
(options.body.file as FileContent[])[i].name,
);
} }
form.append("payload_json", JSON.stringify({ ...options.body, file: undefined })); // IF A REASON IS PROVIDED ENCODE IT IN HEADERS
options.body.file = form; if (options.body?.reason) {
} else if (options.body && !["GET", "DELETE"].includes(options.method)) { headers["X-Audit-Log-Reason"] = encodeURIComponent(options.body.reason as string);
headers["Content-Type"] = "application/json"; options.body.reason = undefined;
} }
return { // IF A FILE/ATTACHMENT IS PRESENT WE NEED SPECIAL HANDLING
headers, if (options.body?.file) {
body: (options.body?.file ?? JSON.stringify(options.body)) as FormData | string, if (!Array.isArray(options.body.file)) {
method: options.method, options.body.file = [options.body.file];
}; }
const form = new FormData();
for (let i = 0; i < (options.body.file as FileContent[]).length; i++) {
form.append(
`file${i}`,
(options.body.file as FileContent[])[i].blob,
(options.body.file as FileContent[])[i].name,
);
}
form.append("payload_json", JSON.stringify({ ...options.body, file: undefined }));
options.body.file = form;
} else if (options.body && !["GET", "DELETE"].includes(options.method)) {
headers["Content-Type"] = "application/json";
}
return {
headers,
body: (options.body?.file ?? JSON.stringify(options.body)) as FormData | string,
method: options.method,
};
} }
export interface CreateRequestBodyOptions { export interface CreateRequestBodyOptions {
headers?: Record<string, string>; headers?: Record<string, string>;
method: RequestMethod; method: RequestMethod;
body?: Record<string, unknown>; body?: Record<string, unknown>;
unauthorized?: boolean; unauthorized?: boolean;
} }

View File

@ -2,80 +2,80 @@ import { RestManager } from "./restManager.ts";
import { HTTPResponseCodes } from "../types/shared.ts"; import { HTTPResponseCodes } from "../types/shared.ts";
export async function processGlobalQueue(rest: RestManager) { export async function processGlobalQueue(rest: RestManager) {
// IF QUEUE IS EMPTY EXIT // IF QUEUE IS EMPTY EXIT
if (!rest.globalQueue.length) return; if (!rest.globalQueue.length) return;
// IF QUEUE IS ALREADY RUNNING EXIT // IF QUEUE IS ALREADY RUNNING EXIT
if (rest.globalQueueProcessing) return; if (rest.globalQueueProcessing) return;
// SET AS TRUE SO OTHER QUEUES DON'T START // SET AS TRUE SO OTHER QUEUES DON'T START
rest.globalQueueProcessing = true; rest.globalQueueProcessing = true;
while (rest.globalQueue.length) { while (rest.globalQueue.length) {
// IF THE BOT IS GLOBALLY RATE LIMITED TRY AGAIN // IF THE BOT IS GLOBALLY RATE LIMITED TRY AGAIN
if (rest.globallyRateLimited) { if (rest.globallyRateLimited) {
setTimeout(() => { setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] Globally rate limited, running setTimeout.`); rest.debug(`[REST - processGlobalQueue] Globally rate limited, running setTimeout.`);
rest.processGlobalQueue(rest); rest.processGlobalQueue(rest);
}, 1000); }, 1000);
// BREAK WHILE LOOP // BREAK WHILE LOOP
break; break;
}
if (rest.invalidRequests === rest.maxInvalidRequests - rest.invalidRequestsSafetyAmount) {
setTimeout(() => {
const time = rest.invalidRequestsInterval - (Date.now() - rest.invalidRequestFrozenAt);
rest.debug(
`[REST - processGlobalQueue] Freeze global queue because of invalid requests. Time Remaining: ${
time / 1000
} seconds.`,
);
rest.processGlobalQueue(rest);
}, 1000);
// BREAK WHILE LOOP
break;
}
const request = rest.globalQueue.shift();
// REMOVES ANY POTENTIAL INVALID CONFLICTS
if (!request) continue;
// CHECK RATE LIMITS FOR 429 REPEATS
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
const urlResetIn = rest.checkRateLimits(rest, request.basicURL);
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
const bucketResetIn = request.payload.bucketId ? rest.checkRateLimits(rest, request.payload.bucketId) : false;
if (urlResetIn || bucketResetIn) {
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] rate limited, running setTimeout.`);
// THIS REST IS RATE LIMITED, SO PUSH BACK TO START
rest.globalQueue.unshift(request);
// START QUEUE IF NOT STARTED
rest.processGlobalQueue(rest);
}, urlResetIn || (bucketResetIn as number));
continue;
}
await rest.sendRequest(rest, {
url: request.urlToUse,
method: request.request.method,
bucketId: request.payload.bucketId,
reject: request.request.reject,
respond: request.request.respond,
retryCount: request.payload.retryCount ?? 0,
payload: rest.createRequestBody(rest, {
method: request.request.method,
body: request.payload.body,
}),
})
// Should be handled in sendRequest, this catch just prevents bots from dying
.catch(() => null);
} }
if (rest.invalidRequests === rest.maxInvalidRequests - rest.invalidRequestsSafetyAmount) { // ALLOW OTHER QUEUES TO START WHEN NEW REQUEST IS MADE
setTimeout(() => { rest.globalQueueProcessing = false;
const time = rest.invalidRequestsInterval - (Date.now() - rest.invalidRequestFrozenAt);
rest.debug(
`[REST - processGlobalQueue] Freeze global queue because of invalid requests. Time Remaining: ${
time / 1000
} seconds.`,
);
rest.processGlobalQueue(rest);
}, 1000);
// BREAK WHILE LOOP
break;
}
const request = rest.globalQueue.shift();
// REMOVES ANY POTENTIAL INVALID CONFLICTS
if (!request) continue;
// CHECK RATE LIMITS FOR 429 REPEATS
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
const urlResetIn = rest.checkRateLimits(rest, request.basicURL);
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
const bucketResetIn = request.payload.bucketId ? rest.checkRateLimits(rest, request.payload.bucketId) : false;
if (urlResetIn || bucketResetIn) {
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] rate limited, running setTimeout.`);
// THIS REST IS RATE LIMITED, SO PUSH BACK TO START
rest.globalQueue.unshift(request);
// START QUEUE IF NOT STARTED
rest.processGlobalQueue(rest);
}, urlResetIn || (bucketResetIn as number));
continue;
}
await rest.sendRequest(rest, {
url: request.urlToUse,
method: request.request.method,
bucketId: request.payload.bucketId,
reject: request.request.reject,
respond: request.request.respond,
retryCount: request.payload.retryCount ?? 0,
payload: rest.createRequestBody(rest, {
method: request.request.method,
body: request.payload.body,
}),
})
// Should be handled in sendRequest, this catch just prevents bots from dying
.catch(() => null);
}
// ALLOW OTHER QUEUES TO START WHEN NEW REQUEST IS MADE
rest.globalQueueProcessing = false;
} }

View File

@ -2,56 +2,56 @@ import { RestManager } from "./restManager.ts";
/** Processes the queue by looping over each path separately until the queues are empty. */ /** Processes the queue by looping over each path separately until the queues are empty. */
export function processQueue(rest: RestManager, id: string) { export function processQueue(rest: RestManager, id: string) {
const queue = rest.pathQueues.get(id); const queue = rest.pathQueues.get(id);
if (!queue) return; if (!queue) return;
while (queue.requests.length) { while (queue.requests.length) {
rest.debug(`[REST - processQueue] Running while loop.`); rest.debug(`[REST - processQueue] Running while loop.`);
// SELECT THE FIRST ITEM FROM THIS QUEUE // SELECT THE FIRST ITEM FROM THIS QUEUE
const queuedRequest = queue.requests[0]; const queuedRequest = queue.requests[0];
// IF THIS DOESN'T HAVE ANY ITEMS JUST CANCEL, THE CLEANER WILL REMOVE IT. // IF THIS DOESN'T HAVE ANY ITEMS JUST CANCEL, THE CLEANER WILL REMOVE IT.
if (!queuedRequest) break; if (!queuedRequest) break;
const basicURL = rest.simplifyUrl(queuedRequest.request.url, queuedRequest.request.method); const basicURL = rest.simplifyUrl(queuedRequest.request.url, queuedRequest.request.method);
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN // IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
const urlResetIn = rest.checkRateLimits(rest, basicURL); const urlResetIn = rest.checkRateLimits(rest, basicURL);
if (urlResetIn) { if (urlResetIn) {
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING // ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
if (!queue.isWaiting) { if (!queue.isWaiting) {
queue.isWaiting = true; queue.isWaiting = true;
setTimeout(() => { setTimeout(() => {
queue.isWaiting = false; queue.isWaiting = false;
rest.debug(`[REST - processQueue] rate limited, running setTimeout.`); rest.debug(`[REST - processQueue] rate limited, running setTimeout.`);
rest.processQueue(rest, id); rest.processQueue(rest, id);
}, urlResetIn); }, urlResetIn);
} }
// BREAK WHILE LOOP // BREAK WHILE LOOP
break; break;
}
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
const bucketResetIn = queuedRequest.payload.bucketId
? rest.checkRateLimits(rest, queuedRequest.payload.bucketId)
: false;
// THIS BUCKET IS STILL RATE LIMITED, RE-ADD TO QUEUE
if (bucketResetIn) continue;
// EXECUTE THE REQUEST
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
rest.debug(`[REST - Add To Global Queue] ${JSON.stringify(queuedRequest.payload)}`);
rest.globalQueue.push({
...queuedRequest,
urlToUse: queuedRequest.request.url,
basicURL,
});
rest.processGlobalQueue(rest);
queue.requests.shift();
} }
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS // ONCE QUEUE IS DONE, WE CAN TRY CLEANING UP
const bucketResetIn = queuedRequest.payload.bucketId rest.cleanupQueues(rest);
? rest.checkRateLimits(rest, queuedRequest.payload.bucketId)
: false;
// THIS BUCKET IS STILL RATE LIMITED, RE-ADD TO QUEUE
if (bucketResetIn) continue;
// EXECUTE THE REQUEST
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
rest.debug(`[REST - Add To Global Queue] ${JSON.stringify(queuedRequest.payload)}`);
rest.globalQueue.push({
...queuedRequest,
urlToUse: queuedRequest.request.url,
basicURL,
});
rest.processGlobalQueue(rest);
queue.requests.shift();
}
// ONCE QUEUE IS DONE, WE CAN TRY CLEANING UP
rest.cleanupQueues(rest);
} }

View File

@ -2,28 +2,28 @@ import { RestManager } from "./restManager.ts";
/** This will create a infinite loop running in 1 seconds using tail recursion to keep rate limits clean. When a rate limit resets, this will remove it so the queue can proceed. */ /** This will create a infinite loop running in 1 seconds using tail recursion to keep rate limits clean. When a rate limit resets, this will remove it so the queue can proceed. */
export function processRateLimitedPaths(rest: RestManager) { export function processRateLimitedPaths(rest: RestManager) {
const now = Date.now(); const now = Date.now();
for (const [key, value] of rest.rateLimitedPaths.entries()) { for (const [key, value] of rest.rateLimitedPaths.entries()) {
rest.debug(`[REST - processRateLimitedPaths] Running for of loop. ${value.resetTimestamp - now}`); rest.debug(`[REST - processRateLimitedPaths] Running for of loop. ${value.resetTimestamp - now}`);
// IF THE TIME HAS NOT REACHED CANCEL // IF THE TIME HAS NOT REACHED CANCEL
if (value.resetTimestamp > now) continue; if (value.resetTimestamp > now) continue;
// RATE LIMIT IS OVER, DELETE THE RATE LIMITER // RATE LIMIT IS OVER, DELETE THE RATE LIMITER
rest.rateLimitedPaths.delete(key); rest.rateLimitedPaths.delete(key);
// IF IT WAS GLOBAL ALSO MARK THE GLOBAL VALUE AS FALSE // IF IT WAS GLOBAL ALSO MARK THE GLOBAL VALUE AS FALSE
if (key === "global") rest.globallyRateLimited = false; if (key === "global") rest.globallyRateLimited = false;
} }
// ALL PATHS ARE CLEARED CAN CANCEL OUT! // ALL PATHS ARE CLEARED CAN CANCEL OUT!
if (!rest.rateLimitedPaths.size) { if (!rest.rateLimitedPaths.size) {
rest.processingRateLimitedPaths = false; rest.processingRateLimitedPaths = false;
} else { } else {
rest.processingRateLimitedPaths = true; rest.processingRateLimitedPaths = true;
// RECHECK IN 1 SECOND // RECHECK IN 1 SECOND
setTimeout(() => { setTimeout(() => {
rest.debug(`[REST - processRateLimitedPaths] Running setTimeout.`); rest.debug(`[REST - processRateLimitedPaths] Running setTimeout.`);
rest.processRateLimitedPaths(rest); rest.processRateLimitedPaths(rest);
}, 1000); }, 1000);
} }
} }

View File

@ -4,33 +4,33 @@ import { RestPayload, RestRequest } from "./rest.ts";
/** Processes a request and assigns it to a queue or creates a queue if none exists for it. */ /** Processes a request and assigns it to a queue or creates a queue if none exists for it. */
export function processRequest(rest: RestManager, request: RestRequest, payload: RestPayload) { export function processRequest(rest: RestManager, request: RestRequest, payload: RestPayload) {
const route = request.url.substring(request.url.indexOf("api/")); const route = request.url.substring(request.url.indexOf("api/"));
const parts = route.split("/"); const parts = route.split("/");
// REMOVE THE API // REMOVE THE API
parts.shift(); parts.shift();
// REMOVES THE VERSION NUMBER // REMOVES THE VERSION NUMBER
if (parts[0]?.startsWith("v")) parts.shift(); if (parts[0]?.startsWith("v")) parts.shift();
// SET THE NEW REQUEST URL // SET THE NEW REQUEST URL
request.url = `${BASE_URL}/v${rest.version}/${parts.join("/")}`; request.url = `${BASE_URL}/v${rest.version}/${parts.join("/")}`;
// REMOVE THE MAJOR PARAM // REMOVE THE MAJOR PARAM
parts.shift(); parts.shift();
const url = rest.simplifyUrl(request.url, request.method); const url = rest.simplifyUrl(request.url, request.method);
const queue = rest.pathQueues.get(url); const queue = rest.pathQueues.get(url);
if (queue) { if (queue) {
queue.requests.push({ request, payload }); queue.requests.push({ request, payload });
} else { } else {
// CREATES A NEW QUEUE // CREATES A NEW QUEUE
rest.pathQueues.set(url, { rest.pathQueues.set(url, {
isWaiting: false, isWaiting: false,
requests: [ requests: [
{ {
request, request,
payload, payload,
}, },
], ],
}); });
rest.processQueue(rest, url); rest.processQueue(rest, url);
} }
} }

View File

@ -2,62 +2,62 @@ import { RestManager } from "./restManager.ts";
/** Processes the rate limit headers and determines if it needs to be rate limited and returns the bucket id if available */ /** Processes the rate limit headers and determines if it needs to be rate limited and returns the bucket id if available */
export function processRequestHeaders(rest: RestManager, url: string, headers: Headers) { export function processRequestHeaders(rest: RestManager, url: string, headers: Headers) {
let rateLimited = false; let rateLimited = false;
// GET ALL NECESSARY HEADERS // GET ALL NECESSARY HEADERS
const remaining = headers.get("x-ratelimit-remaining"); const remaining = headers.get("x-ratelimit-remaining");
const retryAfter = headers.get("x-ratelimit-reset-after"); const retryAfter = headers.get("x-ratelimit-reset-after");
const reset = Date.now() + Number(retryAfter) * 1000; const reset = Date.now() + Number(retryAfter) * 1000;
const global = headers.get("x-ratelimit-global"); const global = headers.get("x-ratelimit-global");
// undefined override null needed for typings // undefined override null needed for typings
const bucketId = headers.get("x-ratelimit-bucket") || undefined; const bucketId = headers.get("x-ratelimit-bucket") || undefined;
// IF THERE IS NO REMAINING RATE LIMIT, MARK IT AS RATE LIMITED // IF THERE IS NO REMAINING RATE LIMIT, MARK IT AS RATE LIMITED
if (remaining === "0") { if (remaining === "0") {
rateLimited = true; rateLimited = true;
// SAVE THE URL AS LIMITED, IMPORTANT FOR NEW REQUESTS BY USER WITHOUT BUCKET // SAVE THE URL AS LIMITED, IMPORTANT FOR NEW REQUESTS BY USER WITHOUT BUCKET
rest.rateLimitedPaths.set(url, { rest.rateLimitedPaths.set(url, {
url, url,
resetTimestamp: reset, resetTimestamp: reset,
bucketId, bucketId,
}); });
// SAVE THE BUCKET AS LIMITED SINCE DIFFERENT URLS MAY SHARE A BUCKET // SAVE THE BUCKET AS LIMITED SINCE DIFFERENT URLS MAY SHARE A BUCKET
if (bucketId) { if (bucketId) {
rest.rateLimitedPaths.set(bucketId, { rest.rateLimitedPaths.set(bucketId, {
url, url,
resetTimestamp: reset, resetTimestamp: reset,
bucketId, bucketId,
}); });
}
} }
}
// IF THERE IS NO REMAINING GLOBAL LIMIT, MARK IT RATE LIMITED GLOBALLY // IF THERE IS NO REMAINING GLOBAL LIMIT, MARK IT RATE LIMITED GLOBALLY
if (global) { if (global) {
const retryAfter = headers.get("retry-after"); const retryAfter = headers.get("retry-after");
const globalReset = Date.now() + Number(retryAfter) * 1000; const globalReset = Date.now() + Number(retryAfter) * 1000;
rest.debug(`[REST = Globally Rate Limited] URL: ${url} | Global Rest: ${globalReset}`); rest.debug(`[REST = Globally Rate Limited] URL: ${url} | Global Rest: ${globalReset}`);
rest.globallyRateLimited = true; rest.globallyRateLimited = true;
rateLimited = true; rateLimited = true;
rest.rateLimitedPaths.set("global", { rest.rateLimitedPaths.set("global", {
url: "global", url: "global",
resetTimestamp: globalReset, resetTimestamp: globalReset,
bucketId, bucketId,
}); });
if (bucketId) { if (bucketId) {
rest.rateLimitedPaths.set(bucketId, { rest.rateLimitedPaths.set(bucketId, {
url: "global", url: "global",
resetTimestamp: globalReset, resetTimestamp: globalReset,
bucketId, bucketId,
}); });
}
} }
}
if (rateLimited && !rest.processingRateLimitedPaths) { if (rateLimited && !rest.processingRateLimitedPaths) {
rest.processRateLimitedPaths(rest); rest.processRateLimitedPaths(rest);
} }
return rateLimited ? bucketId : undefined; return rateLimited ? bucketId : undefined;
} }

30
vendor/rest/rest.ts vendored
View File

@ -1,31 +1,31 @@
export interface RestRequest { export interface RestRequest {
url: string; url: string;
method: RequestMethod; method: RequestMethod;
respond: (payload: RestRequestResponse) => unknown; respond: (payload: RestRequestResponse) => unknown;
reject: (payload: RestRequestRejection) => unknown; reject: (payload: RestRequestRejection) => unknown;
} }
export interface RestRequestResponse { export interface RestRequestResponse {
ok: boolean; ok: boolean;
status: number; status: number;
body?: string; body?: string;
} }
export interface RestRequestRejection extends RestRequestResponse { export interface RestRequestRejection extends RestRequestResponse {
error: string; error: string;
} }
export interface RestPayload { export interface RestPayload {
bucketId?: string; bucketId?: string;
body?: Record<string, unknown>; body?: Record<string, unknown>;
retryCount: number; retryCount: number;
headers?: Record<string, string>; headers?: Record<string, string>;
} }
export interface RestRateLimitedPath { export interface RestRateLimitedPath {
url: string; url: string;
resetTimestamp: number; resetTimestamp: number;
bucketId?: string; bucketId?: string;
} }
export type RequestMethod = "GET" | "POST" | "PUT" | "DELETE" | "PATCH"; export type RequestMethod = "GET" | "POST" | "PUT" | "DELETE" | "PATCH";

View File

@ -16,87 +16,87 @@ import { removeTokenPrefix } from "../util/token.ts";
import { sendRequest } from "./sendRequest.ts"; import { sendRequest } from "./sendRequest.ts";
export function createRestManager(options: CreateRestManagerOptions) { export function createRestManager(options: CreateRestManagerOptions) {
const version = options.version || API_VERSION; const version = options.version || API_VERSION;
if (options.customUrl) { if (options.customUrl) {
baseEndpoints.BASE_URL = `${options.customUrl}/v${version}`; baseEndpoints.BASE_URL = `${options.customUrl}/v${version}`;
} }
return { return {
// current invalid amount // current invalid amount
invalidRequests: 0, invalidRequests: 0,
// max invalid requests allowed until ban // max invalid requests allowed until ban
maxInvalidRequests: 10000, maxInvalidRequests: 10000,
// 10 minutes // 10 minutes
invalidRequestsInterval: 600000, invalidRequestsInterval: 600000,
// timer to reset to 0 // timer to reset to 0
invalidRequestsTimeoutId: 0, invalidRequestsTimeoutId: 0,
// how safe to be from max // how safe to be from max
invalidRequestsSafetyAmount: 1, invalidRequestsSafetyAmount: 1,
// when first request in this period was made // when first request in this period was made
invalidRequestFrozenAt: 0, invalidRequestFrozenAt: 0,
invalidRequestErrorStatuses: [401, 403, 429], invalidRequestErrorStatuses: [401, 403, 429],
version, version,
token: removeTokenPrefix(options.token), token: removeTokenPrefix(options.token),
maxRetryCount: options.maxRetryCount || 10, maxRetryCount: options.maxRetryCount || 10,
secretKey: options.secretKey || "discordeno_best_lib_ever", secretKey: options.secretKey || "discordeno_best_lib_ever",
customUrl: options.customUrl || "", customUrl: options.customUrl || "",
pathQueues: new Map< pathQueues: new Map<
string, string,
{ {
isWaiting: boolean; isWaiting: boolean;
requests: { requests: {
request: RestRequest; request: RestRequest;
payload: RestPayload; payload: RestPayload;
}[]; }[];
} }
>(), >(),
processingQueue: false, processingQueue: false,
processingRateLimitedPaths: false, processingRateLimitedPaths: false,
globallyRateLimited: false, globallyRateLimited: false,
globalQueue: [] as { globalQueue: [] as {
request: RestRequest; request: RestRequest;
payload: RestPayload; payload: RestPayload;
basicURL: string; basicURL: string;
urlToUse: string; urlToUse: string;
}[], }[],
globalQueueProcessing: false, globalQueueProcessing: false,
rateLimitedPaths: new Map<string, RestRateLimitedPath>(), rateLimitedPaths: new Map<string, RestRateLimitedPath>(),
debug: options.debug || function (_text: string) {}, debug: options.debug || function (_text: string) {},
checkRateLimits: options.checkRateLimits || checkRateLimits, checkRateLimits: options.checkRateLimits || checkRateLimits,
cleanupQueues: options.cleanupQueues || cleanupQueues, cleanupQueues: options.cleanupQueues || cleanupQueues,
processQueue: options.processQueue || processQueue, processQueue: options.processQueue || processQueue,
processRateLimitedPaths: options.processRateLimitedPaths || processRateLimitedPaths, processRateLimitedPaths: options.processRateLimitedPaths || processRateLimitedPaths,
processRequestHeaders: options.processRequestHeaders || processRequestHeaders, processRequestHeaders: options.processRequestHeaders || processRequestHeaders,
processRequest: options.processRequest || processRequest, processRequest: options.processRequest || processRequest,
createRequestBody: options.createRequestBody || createRequestBody, createRequestBody: options.createRequestBody || createRequestBody,
runMethod: options.runMethod || runMethod, runMethod: options.runMethod || runMethod,
simplifyUrl: options.simplifyUrl || simplifyUrl, simplifyUrl: options.simplifyUrl || simplifyUrl,
processGlobalQueue: options.processGlobalQueue || processGlobalQueue, processGlobalQueue: options.processGlobalQueue || processGlobalQueue,
convertRestError: options.convertRestError || convertRestError, convertRestError: options.convertRestError || convertRestError,
sendRequest: options.sendRequest || sendRequest, sendRequest: options.sendRequest || sendRequest,
}; };
} }
export interface CreateRestManagerOptions { export interface CreateRestManagerOptions {
token: string; token: string;
customUrl?: string; customUrl?: string;
maxRetryCount?: number; maxRetryCount?: number;
version?: number; version?: number;
secretKey?: string; secretKey?: string;
debug?: (text: string) => unknown; debug?: (text: string) => unknown;
checkRateLimits?: typeof checkRateLimits; checkRateLimits?: typeof checkRateLimits;
cleanupQueues?: typeof cleanupQueues; cleanupQueues?: typeof cleanupQueues;
processQueue?: typeof processQueue; processQueue?: typeof processQueue;
processRateLimitedPaths?: typeof processRateLimitedPaths; processRateLimitedPaths?: typeof processRateLimitedPaths;
processRequestHeaders?: typeof processRequestHeaders; processRequestHeaders?: typeof processRequestHeaders;
processRequest?: typeof processRequest; processRequest?: typeof processRequest;
createRequestBody?: typeof createRequestBody; createRequestBody?: typeof createRequestBody;
runMethod?: typeof runMethod; runMethod?: typeof runMethod;
simplifyUrl?: typeof simplifyUrl; simplifyUrl?: typeof simplifyUrl;
processGlobalQueue?: typeof processGlobalQueue; processGlobalQueue?: typeof processGlobalQueue;
convertRestError?: typeof convertRestError; convertRestError?: typeof convertRestError;
sendRequest?: typeof sendRequest; sendRequest?: typeof sendRequest;
} }
export type RestManager = ReturnType<typeof createRestManager>; export type RestManager = ReturnType<typeof createRestManager>;

View File

@ -3,76 +3,76 @@ import { API_VERSION, BASE_URL, baseEndpoints } from "../util/constants.ts";
import { RequestMethod, RestRequestRejection, RestRequestResponse } from "./rest.ts"; import { RequestMethod, RestRequestRejection, RestRequestResponse } from "./rest.ts";
export async function runMethod<T = any>( export async function runMethod<T = any>(
rest: RestManager, rest: RestManager,
method: RequestMethod, method: RequestMethod,
route: string, route: string,
body?: unknown, body?: unknown,
options?: { options?: {
retryCount?: number; retryCount?: number;
bucketId?: string; bucketId?: string;
headers?: Record<string, string>; headers?: Record<string, string>;
}, },
): Promise<T> { ): Promise<T> {
rest.debug( rest.debug(
`[REST - RequestCreate] Method: ${method} | URL: ${route} | Retry Count: ${ `[REST - RequestCreate] Method: ${method} | URL: ${route} | Retry Count: ${
options?.retryCount ?? 0 options?.retryCount ?? 0
} | Bucket ID: ${options?.bucketId} | Body: ${ } | Bucket ID: ${options?.bucketId} | Body: ${
JSON.stringify( JSON.stringify(
body, body,
) )
}`, }`,
); );
const errorStack = new Error("Location:"); const errorStack = new Error("Location:");
// @ts-ignore Breaks deno deploy. Luca said add ts-ignore until it's fixed // @ts-ignore Breaks deno deploy. Luca said add ts-ignore until it's fixed
Error.captureStackTrace(errorStack); Error.captureStackTrace(errorStack);
// For proxies we don't need to do any of the legwork so we just forward the request // For proxies we don't need to do any of the legwork so we just forward the request
if (!baseEndpoints.BASE_URL.startsWith(BASE_URL) && route[0] === "/") { if (!baseEndpoints.BASE_URL.startsWith(BASE_URL) && route[0] === "/") {
const result = await fetch(`${baseEndpoints.BASE_URL}${route}`, { const result = await fetch(`${baseEndpoints.BASE_URL}${route}`, {
body: body ? JSON.stringify(body) : undefined, body: body ? JSON.stringify(body) : undefined,
headers: { headers: {
Authorization: rest.secretKey, Authorization: rest.secretKey,
"Content-Type": "application/json", "Content-Type": "application/json",
}, },
method, method,
}).catch((error) => { }).catch((error) => {
errorStack.message = (error as Error)?.message; errorStack.message = (error as Error)?.message;
console.error(error); console.error(error);
throw errorStack; throw errorStack;
}); });
if (!result.ok) { if (!result.ok) {
errorStack.message = result.statusText; errorStack.message = result.statusText;
rest.debug(`[ERROR] ${errorStack.message}`); rest.debug(`[ERROR] ${errorStack.message}`);
// Closes the response to prevent memory leak // Closes the response to prevent memory leak
await result.text(); await result.text();
throw errorStack; throw errorStack;
}
return result.status !== 204 ? await result.json() : undefined;
} }
return result.status !== 204 ? await result.json() : undefined; // No proxy so we need to handle all rate limiting and such
} return new Promise((resolve, reject) => {
rest.processRequest(
// No proxy so we need to handle all rate limiting and such rest,
return new Promise((resolve, reject) => { {
rest.processRequest( url: route[0] === "/" ? `${BASE_URL}/v${API_VERSION}${route}` : route,
rest, method,
{ reject: (data: RestRequestRejection) => {
url: route[0] === "/" ? `${BASE_URL}/v${API_VERSION}${route}` : route, const restError = rest.convertRestError(errorStack, data);
method, reject(restError);
reject: (data: RestRequestRejection) => { },
const restError = rest.convertRestError(errorStack, data); respond: (data: RestRequestResponse) =>
reject(restError); resolve(data.status !== 204 ? JSON.parse(data.body ?? "{}") : (undefined as unknown as T)),
}, },
respond: (data: RestRequestResponse) => {
resolve(data.status !== 204 ? JSON.parse(data.body ?? "{}") : (undefined as unknown as T)), bucketId: options?.bucketId,
}, body: body as Record<string, unknown> | undefined,
{ retryCount: options?.retryCount ?? 0,
bucketId: options?.bucketId, headers: options?.headers,
body: body as Record<string, unknown> | undefined, },
retryCount: options?.retryCount ?? 0, );
headers: options?.headers, });
},
);
});
} }

View File

@ -6,42 +6,42 @@ export type ProxyMethodResponse<T> = Omit<RestRequestResponse | RestRequestRejec
// Left out proxy request, because it's not needed here // Left out proxy request, because it's not needed here
// this file could also be moved to a plugin. // this file could also be moved to a plugin.
export async function runProxyMethod<T = any>( export async function runProxyMethod<T = any>(
rest: RestManager, rest: RestManager,
method: "GET" | "POST" | "PUT" | "DELETE" | "PATCH", method: "GET" | "POST" | "PUT" | "DELETE" | "PATCH",
url: string, url: string,
body?: unknown, body?: unknown,
retryCount = 0, retryCount = 0,
bucketId?: string, bucketId?: string,
): Promise<ProxyMethodResponse<T>> { ): Promise<ProxyMethodResponse<T>> {
rest.debug( rest.debug(
`[REST - RequestCreate] Method: ${method} | URL: ${url} | Retry Count: ${retryCount} | Bucket ID: ${bucketId} | Body: ${ `[REST - RequestCreate] Method: ${method} | URL: ${url} | Retry Count: ${retryCount} | Bucket ID: ${bucketId} | Body: ${
JSON.stringify( JSON.stringify(
body, body,
) )
}`, }`,
);
// No proxy so we need to handle all rate limiting and such
return new Promise((resolve, reject) => {
rest.processRequest(
rest,
{
url,
method,
reject: (data: RestRequestRejection) => {
const { body: b, ...r } = data;
reject({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
},
respond: (data: RestRequestResponse) => {
const { body: b, ...r } = data;
resolve({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
},
},
{
bucketId,
body: body as Record<string, unknown> | undefined,
retryCount,
},
); );
});
// No proxy so we need to handle all rate limiting and such
return new Promise((resolve, reject) => {
rest.processRequest(
rest,
{
url,
method,
reject: (data: RestRequestRejection) => {
const { body: b, ...r } = data;
reject({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
},
respond: (data: RestRequestResponse) => {
const { body: b, ...r } = data;
resolve({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
},
},
{
bucketId,
body: body as Record<string, unknown> | undefined,
retryCount,
},
);
});
} }

View File

@ -4,152 +4,152 @@ import { RequestMethod } from "./rest.ts";
import { RestManager } from "./restManager.ts"; import { RestManager } from "./restManager.ts";
export interface RestSendRequestOptions { export interface RestSendRequestOptions {
url: string; url: string;
method: RequestMethod; method: RequestMethod;
bucketId?: string; bucketId?: string;
reject?: Function; reject?: Function;
respond?: Function; respond?: Function;
retryCount?: number; retryCount?: number;
payload?: { payload?: {
headers: Record<string, string>; headers: Record<string, string>;
body: string | FormData; body: string | FormData;
}; };
} }
export async function sendRequest<T>(rest: RestManager, options: RestSendRequestOptions): Promise<T> { export async function sendRequest<T>(rest: RestManager, options: RestSendRequestOptions): Promise<T> {
try { try {
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE // CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
rest.debug(`[REST - fetching] URL: ${options.url} | ${JSON.stringify(options)}`); rest.debug(`[REST - fetching] URL: ${options.url} | ${JSON.stringify(options)}`);
const response = await fetch( const response = await fetch(
options.url.startsWith(BASE_URL) ? options.url : `${BASE_URL}/v${rest.version}/${options.url}`, options.url.startsWith(BASE_URL) ? options.url : `${BASE_URL}/v${rest.version}/${options.url}`,
{ {
method: options.method, method: options.method,
headers: options.payload?.headers, headers: options.payload?.headers,
body: options.payload?.body, body: options.payload?.body,
}, },
); );
rest.debug(`[REST - fetched] URL: ${options.url} | ${JSON.stringify(options)}`); rest.debug(`[REST - fetched] URL: ${options.url} | ${JSON.stringify(options)}`);
const bucketIdFromHeaders = rest.processRequestHeaders( const bucketIdFromHeaders = rest.processRequestHeaders(
rest, rest,
rest.simplifyUrl(options.url, options.method), rest.simplifyUrl(options.url, options.method),
response.headers, response.headers,
); );
// SET THE BUCKET Id IF IT WAS PRESENT // SET THE BUCKET Id IF IT WAS PRESENT
if (bucketIdFromHeaders) { if (bucketIdFromHeaders) {
options.bucketId = bucketIdFromHeaders; options.bucketId = bucketIdFromHeaders;
}
if (response.status < 200 || response.status >= 400) {
rest.debug(
`[REST - httpError] Payload: ${JSON.stringify(options)} | Response: ${JSON.stringify(response)}`,
);
let error = "REQUEST_UNKNOWN_ERROR";
switch (response.status) {
case HTTPResponseCodes.BadRequest:
error = "The options was improperly formatted, or the server couldn't understand it.";
break;
case HTTPResponseCodes.Unauthorized:
error = "The Authorization header was missing or invalid.";
break;
case HTTPResponseCodes.Forbidden:
error = "The Authorization token you passed did not have permission to the resource.";
break;
case HTTPResponseCodes.NotFound:
error = "The resource at the location specified doesn't exist.";
break;
case HTTPResponseCodes.MethodNotAllowed:
error = "The HTTP method used is not valid for the location specified.";
break;
case HTTPResponseCodes.GatewayUnavailable:
error = "There was not a gateway available to process your options. Wait a bit and retry.";
break;
}
if (
rest.invalidRequestErrorStatuses.includes(response.status) &&
!(response.status === 429 && response.headers.get("X-RateLimit-Scope"))
) {
// INCREMENT CURRENT INVALID REQUESTS
++rest.invalidRequests;
if (!rest.invalidRequestsTimeoutId) {
rest.invalidRequestsTimeoutId = setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] Resetting invalid optionss counter in setTimeout.`);
rest.invalidRequests = 0;
rest.invalidRequestsTimeoutId = 0;
}, rest.invalidRequestsInterval);
} }
}
// If NOT rate limited remove from queue if (response.status < 200 || response.status >= 400) {
if (response.status !== 429) { rest.debug(
`[REST - httpError] Payload: ${JSON.stringify(options)} | Response: ${JSON.stringify(response)}`,
);
let error = "REQUEST_UNKNOWN_ERROR";
switch (response.status) {
case HTTPResponseCodes.BadRequest:
error = "The options was improperly formatted, or the server couldn't understand it.";
break;
case HTTPResponseCodes.Unauthorized:
error = "The Authorization header was missing or invalid.";
break;
case HTTPResponseCodes.Forbidden:
error = "The Authorization token you passed did not have permission to the resource.";
break;
case HTTPResponseCodes.NotFound:
error = "The resource at the location specified doesn't exist.";
break;
case HTTPResponseCodes.MethodNotAllowed:
error = "The HTTP method used is not valid for the location specified.";
break;
case HTTPResponseCodes.GatewayUnavailable:
error = "There was not a gateway available to process your options. Wait a bit and retry.";
break;
}
if (
rest.invalidRequestErrorStatuses.includes(response.status) &&
!(response.status === 429 && response.headers.get("X-RateLimit-Scope"))
) {
// INCREMENT CURRENT INVALID REQUESTS
++rest.invalidRequests;
if (!rest.invalidRequestsTimeoutId) {
rest.invalidRequestsTimeoutId = setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] Resetting invalid optionss counter in setTimeout.`);
rest.invalidRequests = 0;
rest.invalidRequestsTimeoutId = 0;
}, rest.invalidRequestsInterval);
}
}
// If NOT rate limited remove from queue
if (response.status !== 429) {
options.reject?.({
ok: false,
status: response.status,
error,
body: response.type ? JSON.stringify(await response.json()) : undefined,
});
throw new Error(
JSON.stringify({
ok: false,
status: response.status,
error,
body: response.type ? JSON.stringify(await response.json()) : undefined,
}),
);
} else {
if (options.retryCount && options.retryCount++ >= rest.maxRetryCount) {
rest.debug(`[REST - RetriesMaxed] ${JSON.stringify(options)}`);
// REMOVE ITEM FROM QUEUE TO PREVENT RETRY
options.reject?.({
ok: false,
status: response.status,
error: "The options was rate limited and it maxed out the retries limit.",
});
// @ts-ignore Code should never reach here
return;
}
}
}
// SOMETIMES DISCORD RETURNS AN EMPTY 204 RESPONSE THAT CAN'T BE MADE TO JSON
if (response.status === 204) {
rest.debug(`[REST - FetchSuccess] URL: ${options.url} | ${JSON.stringify(options)}`);
options.respond?.({
ok: true,
status: 204,
});
// @ts-ignore 204 will be void
return;
} else {
// CONVERT THE RESPONSE TO JSON
const json = JSON.stringify(await response.json());
rest.debug(`[REST - fetchSuccess] ${JSON.stringify(options)}`);
options.respond?.({
ok: true,
status: 200,
body: json,
});
return JSON.parse(json);
}
} catch (error) {
// SOMETHING WENT WRONG, LOG AND RESPOND WITH ERROR
rest.debug(`[REST - fetchFailed] Payload: ${JSON.stringify(options)} | Error: ${error}`);
options.reject?.({ options.reject?.({
ok: false, ok: false,
status: response.status, status: 599,
error, error: "Internal Proxy Error",
body: response.type ? JSON.stringify(await response.json()) : undefined,
}); });
throw new Error( throw new Error("Something went wrong in sendRequest", {
JSON.stringify({ cause: error,
ok: false, });
status: response.status,
error,
body: response.type ? JSON.stringify(await response.json()) : undefined,
}),
);
} else {
if (options.retryCount && options.retryCount++ >= rest.maxRetryCount) {
rest.debug(`[REST - RetriesMaxed] ${JSON.stringify(options)}`);
// REMOVE ITEM FROM QUEUE TO PREVENT RETRY
options.reject?.({
ok: false,
status: response.status,
error: "The options was rate limited and it maxed out the retries limit.",
});
// @ts-ignore Code should never reach here
return;
}
}
} }
// SOMETIMES DISCORD RETURNS AN EMPTY 204 RESPONSE THAT CAN'T BE MADE TO JSON
if (response.status === 204) {
rest.debug(`[REST - FetchSuccess] URL: ${options.url} | ${JSON.stringify(options)}`);
options.respond?.({
ok: true,
status: 204,
});
// @ts-ignore 204 will be void
return;
} else {
// CONVERT THE RESPONSE TO JSON
const json = JSON.stringify(await response.json());
rest.debug(`[REST - fetchSuccess] ${JSON.stringify(options)}`);
options.respond?.({
ok: true,
status: 200,
body: json,
});
return JSON.parse(json);
}
} catch (error) {
// SOMETHING WENT WRONG, LOG AND RESPOND WITH ERROR
rest.debug(`[REST - fetchFailed] Payload: ${JSON.stringify(options)} | Error: ${error}`);
options.reject?.({
ok: false,
status: 599,
error: "Internal Proxy Error",
});
throw new Error("Something went wrong in sendRequest", {
cause: error,
});
}
} }

View File

@ -5,21 +5,21 @@
/** Split a url to separate rate limit buckets based on major/minor parameters. */ /** Split a url to separate rate limit buckets based on major/minor parameters. */
export function simplifyUrl(url: string, method: string) { export function simplifyUrl(url: string, method: string) {
let route = url let route = url
.replace(/\/([a-z-]+)\/(?:[0-9]{17,19})/g, function (match, p) { .replace(/\/([a-z-]+)\/(?:[0-9]{17,19})/g, function (match, p) {
return ["channels", "guilds"].includes(p) ? match : `/${p}/skillzPrefersID`; return ["channels", "guilds"].includes(p) ? match : `/${p}/skillzPrefersID`;
}) })
.replace(/\/reactions\/[^/]+/g, "/reactions/skillzPrefersID"); .replace(/\/reactions\/[^/]+/g, "/reactions/skillzPrefersID");
// GENERAL /reactions and /reactions/emoji/@me share the buckets // GENERAL /reactions and /reactions/emoji/@me share the buckets
if (route.includes("/reactions")) { if (route.includes("/reactions")) {
route = route.substring(0, route.indexOf("/reactions") + "/reactions".length); route = route.substring(0, route.indexOf("/reactions") + "/reactions".length);
} }
// Delete Message endpoint has its own rate limit // Delete Message endpoint has its own rate limit
if (method === "DELETE" && route.endsWith("/messages/skillzPrefersID")) { if (method === "DELETE" && route.endsWith("/messages/skillzPrefersID")) {
route = method + route; route = method + route;
} }
return route; return route;
} }

3754
vendor/types/discord.ts vendored

File diff suppressed because it is too large Load Diff

2095
vendor/types/shared.ts vendored

File diff suppressed because it is too large Load Diff

244
vendor/util/bucket.ts vendored
View File

@ -8,168 +8,168 @@ import { delay } from "./delay.ts";
* NOTE: This bucket is lazy, means it only updates when a related method is called. * NOTE: This bucket is lazy, means it only updates when a related method is called.
*/ */
export interface LeakyBucket { export interface LeakyBucket {
// ---------- // ----------
// PROPERTIES // PROPERTIES
// ---------- // ----------
/** How many tokens this bucket can hold. */ /** How many tokens this bucket can hold. */
max: number; max: number;
/** Amount of tokens gained per interval. /** Amount of tokens gained per interval.
* If bigger than `max` it will be pressed to `max`. * If bigger than `max` it will be pressed to `max`.
*/ */
refillAmount: number; refillAmount: number;
/** Interval at which the bucket gains tokens. */ /** Interval at which the bucket gains tokens. */
refillInterval: number; refillInterval: number;
// ---------- // ----------
// METHODS // METHODS
// ---------- // ----------
/** Acquire tokens from the bucket. /** Acquire tokens from the bucket.
* Resolves when the tokens are acquired and available. * Resolves when the tokens are acquired and available.
* @param {boolean} [highPriority=false] Whether this acquire is should be done asap. * @param {boolean} [highPriority=false] Whether this acquire is should be done asap.
*/ */
acquire(amount: number, highPriority?: boolean): Promise<void>; acquire(amount: number, highPriority?: boolean): Promise<void>;
/** Returns the number of milliseconds until the next refill. */ /** Returns the number of milliseconds until the next refill. */
nextRefill(): number; nextRefill(): number;
/** Current tokens in the bucket. */ /** Current tokens in the bucket. */
tokens(): number; tokens(): number;
// ---------- // ----------
// INTERNAL STATES // INTERNAL STATES
// ---------- // ----------
/** @private Internal track of when the last refill of tokens was. /** @private Internal track of when the last refill of tokens was.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P * DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/ */
lastRefill: number; lastRefill: number;
/** @private Internal state of whether currently it is allowed to acquire tokens. /** @private Internal state of whether currently it is allowed to acquire tokens.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P * DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/ */
allowAcquire: boolean; allowAcquire: boolean;
/** @private Internal number of currently available tokens. /** @private Internal number of currently available tokens.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P * DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/ */
tokensState: number; tokensState: number;
/** @private Internal array of promises necessary to guarantee no race conditions. /** @private Internal array of promises necessary to guarantee no race conditions.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P * DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/ */
waiting: ((_?: unknown) => void)[]; waiting: ((_?: unknown) => void)[];
} }
export function createLeakyBucket( export function createLeakyBucket(
{ max, refillInterval, refillAmount, tokens, waiting, ...rest }: { max, refillInterval, refillAmount, tokens, waiting, ...rest }:
& Omit< & Omit<
PickPartial< PickPartial<
LeakyBucket, LeakyBucket,
"max" | "refillInterval" | "refillAmount" "max" | "refillInterval" | "refillAmount"
>, >,
"tokens" "tokens"
> >
& { & {
/** Current tokens in the bucket. /** Current tokens in the bucket.
* @default max * @default max
*/ */
tokens?: number; tokens?: number;
}, },
): LeakyBucket { ): LeakyBucket {
return { return {
max, max,
refillInterval, refillInterval,
refillAmount: refillAmount > max ? max : refillAmount, refillAmount: refillAmount > max ? max : refillAmount,
lastRefill: performance.now(), lastRefill: performance.now(),
allowAcquire: true, allowAcquire: true,
nextRefill: function () { nextRefill: function () {
return nextRefill(this); return nextRefill(this);
}, },
tokens: function () { tokens: function () {
return updateTokens(this); return updateTokens(this);
}, },
acquire: async function (amount, highPriority) { acquire: async function (amount, highPriority) {
return await acquire(this, amount, highPriority); return await acquire(this, amount, highPriority);
}, },
tokensState: tokens ?? max, tokensState: tokens ?? max,
waiting: waiting ?? [], waiting: waiting ?? [],
...rest, ...rest,
}; };
} }
/** Update the tokens of that bucket. /** Update the tokens of that bucket.
* @returns {number} The amount of current available tokens. * @returns {number} The amount of current available tokens.
*/ */
function updateTokens(bucket: LeakyBucket): number { function updateTokens(bucket: LeakyBucket): number {
const timePassed = performance.now() - bucket.lastRefill; const timePassed = performance.now() - bucket.lastRefill;
const missedRefills = Math.floor(timePassed / bucket.refillInterval); const missedRefills = Math.floor(timePassed / bucket.refillInterval);
// The refill shall not exceed the max amount of tokens. // The refill shall not exceed the max amount of tokens.
bucket.tokensState = Math.min(bucket.tokensState + (bucket.refillAmount * missedRefills), bucket.max); bucket.tokensState = Math.min(bucket.tokensState + (bucket.refillAmount * missedRefills), bucket.max);
bucket.lastRefill += bucket.refillInterval * missedRefills; bucket.lastRefill += bucket.refillInterval * missedRefills;
return bucket.tokensState; return bucket.tokensState;
} }
function nextRefill(bucket: LeakyBucket): number { function nextRefill(bucket: LeakyBucket): number {
// Since this bucket is lazy update the tokens before calculating the next refill. // Since this bucket is lazy update the tokens before calculating the next refill.
updateTokens(bucket); updateTokens(bucket);
return (performance.now() - bucket.lastRefill) + bucket.refillInterval; return (performance.now() - bucket.lastRefill) + bucket.refillInterval;
} }
async function acquire(bucket: LeakyBucket, amount: number, highPriority = false): Promise<void> { async function acquire(bucket: LeakyBucket, amount: number, highPriority = false): Promise<void> {
// To prevent the race condition of 2 acquires happening at once, // To prevent the race condition of 2 acquires happening at once,
// check whether its currently allowed to acquire. // check whether its currently allowed to acquire.
if (!bucket.allowAcquire) {
// create, push, and wait until the current running acquiring is finished.
await new Promise((resolve) => {
if (highPriority) {
bucket.waiting.unshift(resolve);
} else {
bucket.waiting.push(resolve);
}
});
// Somehow another acquire has started,
// so need to wait again.
if (!bucket.allowAcquire) { if (!bucket.allowAcquire) {
return await acquire(bucket, amount); // create, push, and wait until the current running acquiring is finished.
await new Promise((resolve) => {
if (highPriority) {
bucket.waiting.unshift(resolve);
} else {
bucket.waiting.push(resolve);
}
});
// Somehow another acquire has started,
// so need to wait again.
if (!bucket.allowAcquire) {
return await acquire(bucket, amount);
}
} }
}
bucket.allowAcquire = false; bucket.allowAcquire = false;
// Since the bucket is lazy update the tokens now, // Since the bucket is lazy update the tokens now,
// and also get the current amount of available tokens // and also get the current amount of available tokens
let currentTokens = updateTokens(bucket); let currentTokens = updateTokens(bucket);
// It's possible that more than available tokens have been acquired, // It's possible that more than available tokens have been acquired,
// so calculate the amount of milliseconds to wait until this acquire is good to go. // so calculate the amount of milliseconds to wait until this acquire is good to go.
if (currentTokens < amount) { if (currentTokens < amount) {
const tokensNeeded = amount - currentTokens; const tokensNeeded = amount - currentTokens;
let refillsNeeded = Math.ceil(tokensNeeded / bucket.refillAmount); let refillsNeeded = Math.ceil(tokensNeeded / bucket.refillAmount);
const waitTime = bucket.refillInterval * refillsNeeded; const waitTime = bucket.refillInterval * refillsNeeded;
await delay(waitTime); await delay(waitTime);
// Update the tokens again to ensure nothing has been missed. // Update the tokens again to ensure nothing has been missed.
updateTokens(bucket); updateTokens(bucket);
} }
// In order to not subtract too much from the tokens, // In order to not subtract too much from the tokens,
// calculate what is actually needed to subtract. // calculate what is actually needed to subtract.
const toSubtract = (amount % bucket.refillAmount) || amount; const toSubtract = (amount % bucket.refillAmount) || amount;
bucket.tokensState -= toSubtract; bucket.tokensState -= toSubtract;
// Allow the next acquire to happen. // Allow the next acquire to happen.
bucket.allowAcquire = true; bucket.allowAcquire = true;
// If there is an acquire waiting, let it continue. // If there is an acquire waiting, let it continue.
bucket.waiting.shift()?.(); bucket.waiting.shift()?.();
} }

View File

@ -1,100 +1,100 @@
export class Collection<K, V> extends Map<K, V> { export class Collection<K, V> extends Map<K, V> {
maxSize: number | undefined; maxSize: number | undefined;
constructor(entries?: (readonly (readonly [K, V])[] | null) | Map<K, V>, options?: CollectionOptions<K, V>) { constructor(entries?: (readonly (readonly [K, V])[] | null) | Map<K, V>, options?: CollectionOptions<K, V>) {
super(entries ?? []); super(entries ?? []);
this.maxSize = options?.maxSize; this.maxSize = options?.maxSize;
}
set(key: K, value: V) {
// When this collection is maxSized make sure we can add first
if ((this.maxSize || this.maxSize === 0) && this.size >= this.maxSize) {
return this;
} }
return super.set(key, value); set(key: K, value: V) {
} // When this collection is maxSized make sure we can add first
if ((this.maxSize || this.maxSize === 0) && this.size >= this.maxSize) {
return this;
}
forceSet(key: K, value: V) { return super.set(key, value);
return super.set(key, value);
}
array() {
return [...this.values()];
}
/** Retrieve the value of the first element in this collection */
first(): V | undefined {
return this.values().next().value;
}
last(): V | undefined {
return [...this.values()][this.size - 1];
}
random(): V | undefined {
const array = [...this.values()];
return array[Math.floor(Math.random() * array.length)];
}
find(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (callback(value, key)) return value;
}
// If nothing matched
return;
}
filter(callback: (value: V, key: K) => boolean) {
const relevant = new Collection<K, V>();
this.forEach((value, key) => {
if (callback(value, key)) relevant.set(key, value);
});
return relevant;
}
map<T>(callback: (value: V, key: K) => T) {
const results = [];
for (const key of this.keys()) {
const value = this.get(key)!;
results.push(callback(value, key));
}
return results;
}
some(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (callback(value, key)) return true;
} }
return false; forceSet(key: K, value: V) {
} return super.set(key, value);
every(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (!callback(value, key)) return false;
} }
return true; array() {
} return [...this.values()];
reduce<T>(callback: (accumulator: T, value: V, key: K) => T, initialValue?: T): T {
let accumulator: T = initialValue!;
for (const key of this.keys()) {
const value = this.get(key)!;
accumulator = callback(accumulator, value, key);
} }
return accumulator; /** Retrieve the value of the first element in this collection */
} first(): V | undefined {
return this.values().next().value;
}
last(): V | undefined {
return [...this.values()][this.size - 1];
}
random(): V | undefined {
const array = [...this.values()];
return array[Math.floor(Math.random() * array.length)];
}
find(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (callback(value, key)) return value;
}
// If nothing matched
return;
}
filter(callback: (value: V, key: K) => boolean) {
const relevant = new Collection<K, V>();
this.forEach((value, key) => {
if (callback(value, key)) relevant.set(key, value);
});
return relevant;
}
map<T>(callback: (value: V, key: K) => T) {
const results = [];
for (const key of this.keys()) {
const value = this.get(key)!;
results.push(callback(value, key));
}
return results;
}
some(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (callback(value, key)) return true;
}
return false;
}
every(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (!callback(value, key)) return false;
}
return true;
}
reduce<T>(callback: (accumulator: T, value: V, key: K) => T, initialValue?: T): T {
let accumulator: T = initialValue!;
for (const key of this.keys()) {
const value = this.get(key)!;
accumulator = callback(accumulator, value, key);
}
return accumulator;
}
} }
export interface CollectionOptions<K, V> { export interface CollectionOptions<K, V> {
maxSize?: number; maxSize?: number;
} }

View File

@ -16,8 +16,8 @@ export const IMAGE_BASE_URL = "https://cdn.discordapp.com";
// This can be modified by big brain bots and use a proxy // This can be modified by big brain bots and use a proxy
export const baseEndpoints = { export const baseEndpoints = {
BASE_URL: `${BASE_URL}/v${API_VERSION}`, BASE_URL: `${BASE_URL}/v${API_VERSION}`,
CDN_URL: IMAGE_BASE_URL, CDN_URL: IMAGE_BASE_URL,
}; };
export const SLASH_COMMANDS_NAME_REGEX = /^[-_\p{L}\p{N}\p{sc=Deva}\p{sc=Thai}]{1,32}$/u; export const SLASH_COMMANDS_NAME_REGEX = /^[-_\p{L}\p{N}\p{sc=Deva}\p{sc=Thai}]{1,32}$/u;

10
vendor/util/delay.ts vendored
View File

@ -1,8 +1,8 @@
/** Pause the execution for a given amount of milliseconds. */ /** Pause the execution for a given amount of milliseconds. */
export function delay(ms: number): Promise<void> { export function delay(ms: number): Promise<void> {
return new Promise((res): number => return new Promise((res): number =>
setTimeout((): void => { setTimeout((): void => {
res(); res();
}, ms) }, ms)
); );
} }

14
vendor/util/token.ts vendored
View File

@ -1,14 +1,14 @@
/** Removes the Bot before the token. */ /** Removes the Bot before the token. */
export function removeTokenPrefix(token?: string, type: "GATEWAY" | "REST" = "REST"): string { export function removeTokenPrefix(token?: string, type: "GATEWAY" | "REST" = "REST"): string {
// If no token is provided, throw an error // If no token is provided, throw an error
if (!token) throw new Error(`The ${type} was not given a token. Please provide a token and try again.`); if (!token) throw new Error(`The ${type} was not given a token. Please provide a token and try again.`);
// If the token does not have a prefix just return token // If the token does not have a prefix just return token
if (!token.startsWith("Bot ")) return token; if (!token.startsWith("Bot ")) return token;
// Remove the prefix and return only the token. // Remove the prefix and return only the token.
return token.substring(token.indexOf(" ") + 1); return token.substring(token.indexOf(" ") + 1);
} }
/** Get the bot id from the bot token. WARNING: Discord staff has mentioned this may not be stable forever. Use at your own risk. However, note for over 5 years this has never broken. */ /** Get the bot id from the bot token. WARNING: Discord staff has mentioned this may not be stable forever. Use at your own risk. However, note for over 5 years this has never broken. */
export function getBotIdFromToken(token: string) { export function getBotIdFromToken(token: string) {
return BigInt(atob(token.split(".")[0])); return BigInt(atob(token.split(".")[0]));
} }