mirror of
https://github.com/tiramisulabs/seyfert.git
synced 2025-07-01 20:46:08 +00:00
fix: events
This commit is contained in:
parent
b1f8b0a5b5
commit
67970faf7b
@ -1,2 +1,3 @@
|
||||
# biscuit
|
||||
|
||||
A brand new bleeding edge non bloated Discord library
|
||||
|
8
deno.json
Normal file
8
deno.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"fmt": {
|
||||
"options": {
|
||||
"indentWidth": 4,
|
||||
"lineWidth": 120
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,9 @@
|
||||
import type {
|
||||
DiscordGatewayPayload,
|
||||
Shard,
|
||||
} from "../vendor/external.ts";
|
||||
import type { DiscordGatewayPayload, DiscordMessage, DiscordReady, Shard } from "../vendor/external.ts";
|
||||
|
||||
export type DiscordRawEventHandler = (shard: Shard, data: DiscordGatewayPayload) => unknown;
|
||||
|
||||
export interface Events {
|
||||
ready(payload: DiscordReady, shardId: number): unknown;
|
||||
messageCreate(message: DiscordMessage): unknown;
|
||||
raw(data: DiscordGatewayPayload, shardId: number): unknown;
|
||||
}
|
||||
|
@ -1,129 +1,128 @@
|
||||
import type {
|
||||
GatewayIntents,
|
||||
DiscordGatewayPayload,
|
||||
DiscordGetGatewayBot,
|
||||
DiscordReady,
|
||||
DiscordMessage,
|
||||
GatewayDispatchEventNames,
|
||||
GatewayBot,
|
||||
Shard
|
||||
DiscordGatewayPayload,
|
||||
DiscordGetGatewayBot,
|
||||
DiscordMessage,
|
||||
DiscordReady,
|
||||
GatewayBot,
|
||||
GatewayDispatchEventNames,
|
||||
GatewayIntents,
|
||||
Shard,
|
||||
} from "../vendor/external.ts";
|
||||
|
||||
import {
|
||||
EventEmitter,
|
||||
Snowflake,
|
||||
Routes
|
||||
} from "../util/mod.ts";
|
||||
import { EventEmitter, Routes, Snowflake } from "../util/mod.ts";
|
||||
|
||||
import type {
|
||||
DiscordRawEventHandler,
|
||||
} from "./Events.ts";
|
||||
import type { DiscordRawEventHandler, Events } from "./Events.ts";
|
||||
|
||||
import {
|
||||
createRestManager,
|
||||
createGatewayManager
|
||||
} from "../vendor/external.ts";
|
||||
import { createGatewayManager, createRestManager } from "../vendor/external.ts";
|
||||
|
||||
export interface RestOptions {
|
||||
secretKey?: string;
|
||||
applicationId?: Snowflake;
|
||||
secretKey?: string;
|
||||
applicationId?: Snowflake;
|
||||
}
|
||||
|
||||
export interface GatewayOptions {
|
||||
botId?: Snowflake;
|
||||
data?: GatewayBot;
|
||||
botId?: Snowflake;
|
||||
data?: GatewayBot;
|
||||
}
|
||||
|
||||
export interface SessionOptions {
|
||||
token: string;
|
||||
rawHandler?: DiscordRawEventHandler;
|
||||
intents?: GatewayIntents;
|
||||
rest?: RestOptions;
|
||||
gateway?: GatewayOptions;
|
||||
token: string;
|
||||
rawHandler?: DiscordRawEventHandler;
|
||||
intents?: GatewayIntents;
|
||||
rest?: RestOptions;
|
||||
gateway?: GatewayOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives a Token, connects
|
||||
* */
|
||||
*/
|
||||
export class Session extends EventEmitter {
|
||||
options: SessionOptions;
|
||||
options: SessionOptions;
|
||||
|
||||
// TODO: improve this with CreateShardManager etc
|
||||
rest: ReturnType<typeof createRestManager>;
|
||||
gateway: ReturnType<typeof createGatewayManager>;
|
||||
// TODO: improve this with CreateShardManager etc
|
||||
rest?: ReturnType<typeof createRestManager>;
|
||||
gateway?: ReturnType<typeof createGatewayManager>;
|
||||
|
||||
constructor(options: SessionOptions) {
|
||||
super();
|
||||
this.options = options;
|
||||
constructor(options: SessionOptions) {
|
||||
super();
|
||||
this.options = options;
|
||||
// TODO: set botId in Session.botId or something
|
||||
}
|
||||
|
||||
const defHandler: DiscordRawEventHandler = (shard, data) => {
|
||||
this.emit("raw", data, shard.id);
|
||||
/** TODO: move this */
|
||||
static #toSnakeCase(str: string) {
|
||||
// probably not a fast implementation
|
||||
return str.replace(/[A-Z]/g, (char) => "_" + char.toLowerCase());
|
||||
}
|
||||
|
||||
if (!data.t) return;
|
||||
override on(event: "ready", func: Events["ready"]): this;
|
||||
override on(event: "messageCreate", func: Events["messageCreate"]): this;
|
||||
override on(event: "raw", func: Events["raw"]): this;
|
||||
override on(event: keyof Events, func: Events[keyof Events]): this {
|
||||
return super.on(event, func);
|
||||
}
|
||||
|
||||
this.emit(data.t as GatewayDispatchEventNames, data, shard.id);
|
||||
};
|
||||
override off(event: "ready", func: Events["ready"]): this;
|
||||
override off(event: "messageCreate", func: Events["messageCreate"]): this;
|
||||
override off(event: "raw", func: Events["raw"]): this;
|
||||
override off(event: keyof Events, func: Events[keyof Events]): this {
|
||||
return super.off(event, func);
|
||||
}
|
||||
|
||||
this.rest = createRestManager({
|
||||
token: this.options.token,
|
||||
debug: (text) => {
|
||||
// TODO: set this using the event emitter
|
||||
super.rawListeners("debug")?.forEach((fn) => fn(text));
|
||||
},
|
||||
secretKey: this.options.rest?.secretKey ?? undefined
|
||||
});
|
||||
override once(event: "ready", func: Events["ready"]): this;
|
||||
override once(event: "messageCreate", func: Events["messageCreate"]): this;
|
||||
override once(event: "raw", func: Events["raw"]): this;
|
||||
override once(event: keyof Events, func: Events[keyof Events]): this {
|
||||
return super.once(event, func);
|
||||
}
|
||||
|
||||
this.gateway = createGatewayManager({
|
||||
gatewayBot: options.gateway?.data ?? {} as GatewayBot, // TODO
|
||||
gatewayConfig: {
|
||||
token: options.token,
|
||||
intents: options.intents
|
||||
},
|
||||
handleDiscordPayload: options.rawHandler ?? defHandler
|
||||
});
|
||||
async start() {
|
||||
const defHandler: DiscordRawEventHandler = (shard, data) => {
|
||||
this.emit("raw", data, shard.id);
|
||||
|
||||
// TODO: set botId in Session.botId or something
|
||||
}
|
||||
if (!data.t) return;
|
||||
|
||||
override on(event: "ready", func: (payload: DiscordReady) => unknown): this;
|
||||
override on(event: "raw", func: (shard: Shard, data: DiscordGatewayPayload) => unknown): this;
|
||||
override on(event: "message", func: (message: DiscordMessage) => unknown): this;
|
||||
override on(event: "debug", func: (text: string) => unknown): this;
|
||||
override on(event: string, func: Function): this {
|
||||
return super.on(event, func);
|
||||
}
|
||||
this.emit(data.t as GatewayDispatchEventNames, data, shard.id);
|
||||
};
|
||||
|
||||
override off(event: string, func: Function): this {
|
||||
return super.off(event, func);
|
||||
}
|
||||
this.rest = createRestManager({
|
||||
token: this.options.token,
|
||||
debug: (text) => {
|
||||
// TODO: set this using the event emitter
|
||||
super.rawListeners("debug")?.forEach((fn) => fn(text));
|
||||
},
|
||||
secretKey: this.options.rest?.secretKey ?? undefined,
|
||||
});
|
||||
|
||||
override once(event: string, func: Function): this {
|
||||
return super.once(event, func);
|
||||
}
|
||||
this.gateway = createGatewayManager({
|
||||
gatewayBot: this.options.gateway?.data ?? {} as GatewayBot, // TODO
|
||||
gatewayConfig: {
|
||||
token: this.options.token,
|
||||
intents: this.options.intents,
|
||||
},
|
||||
handleDiscordPayload: this.options.rawHandler ?? defHandler,
|
||||
});
|
||||
|
||||
async start() {
|
||||
const getGatewayBot = () => this.rest.runMethod<DiscordGetGatewayBot>(this.rest, "GET", Routes.GATEWAY_BOT());
|
||||
const getGatewayBot = () => this.rest!.runMethod<DiscordGetGatewayBot>(this.rest!, "GET", Routes.GATEWAY_BOT());
|
||||
|
||||
// check if is empty
|
||||
if (!Object.keys(this.options.gateway?.data ?? {}).length) {
|
||||
const nonParsed = await getGatewayBot();
|
||||
// check if is empty
|
||||
if (!Object.keys(this.options.gateway?.data ?? {}).length) {
|
||||
const nonParsed = await getGatewayBot();
|
||||
|
||||
this.gateway.gatewayBot = {
|
||||
url: nonParsed.url,
|
||||
shards: nonParsed.shards,
|
||||
sessionStartLimit: {
|
||||
total: nonParsed.session_start_limit.total,
|
||||
remaining: nonParsed.session_start_limit.remaining,
|
||||
resetAfter: nonParsed.session_start_limit.reset_after,
|
||||
maxConcurrency: nonParsed.session_start_limit.max_concurrency,
|
||||
},
|
||||
};
|
||||
this.gateway.lastShardId = this.gateway.gatewayBot.shards - 1;
|
||||
this.gateway.manager.totalShards = this.gateway.gatewayBot.shards;
|
||||
}
|
||||
this.gateway.gatewayBot = {
|
||||
url: nonParsed.url,
|
||||
shards: nonParsed.shards,
|
||||
sessionStartLimit: {
|
||||
total: nonParsed.session_start_limit.total,
|
||||
remaining: nonParsed.session_start_limit.remaining,
|
||||
resetAfter: nonParsed.session_start_limit.reset_after,
|
||||
maxConcurrency: nonParsed.session_start_limit.max_concurrency,
|
||||
},
|
||||
};
|
||||
this.gateway.lastShardId = this.gateway.gatewayBot.shards - 1;
|
||||
this.gateway.manager.totalShards = this.gateway.gatewayBot.shards;
|
||||
}
|
||||
|
||||
this.gateway.spawnShards();
|
||||
}
|
||||
this.gateway.spawnShards();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
14
tests/mod.ts
14
tests/mod.ts
@ -1,11 +1,19 @@
|
||||
import * as Discord from "./deps.ts";
|
||||
|
||||
if (!Deno.args[0]) {
|
||||
throw new Error("Please provide a token");
|
||||
}
|
||||
|
||||
const session = new Discord.Session({
|
||||
token: Deno.args[0],
|
||||
token: Deno.args[0],
|
||||
intents: Discord.GatewayIntents.MessageContent | Discord.GatewayIntents.Guilds |
|
||||
Discord.GatewayIntents.GuildMessages,
|
||||
});
|
||||
|
||||
session.on("ready", (payload) => console.log(payload));
|
||||
session.on("raw", (shard, data) => console.log(shard, data));
|
||||
session.on("debug", (text) => console.log(text));
|
||||
session.on("message", (payload) => console.log(payload));
|
||||
// session.on("raw", (data, shardId) => console.log(shardId, data));
|
||||
|
||||
console.log("hello");
|
||||
|
||||
session.start();
|
@ -1,77 +1,74 @@
|
||||
// deno-lint-ignore-file ban-types
|
||||
|
||||
|
||||
/**
|
||||
* An event emitter (observer pattern)
|
||||
* */
|
||||
*/
|
||||
export class EventEmitter {
|
||||
listeners = new Map<PropertyKey, Function[]>;
|
||||
listeners = new Map<PropertyKey, Function[]>();
|
||||
|
||||
#addListener(event: string, func: Function) {
|
||||
this.listeners.set(event, this.listeners.get(event) || []);
|
||||
this.listeners.get(event)?.push(func);
|
||||
return this;
|
||||
}
|
||||
#addListener(event: string, func: Function) {
|
||||
this.listeners.set(event, this.listeners.get(event) || []);
|
||||
this.listeners.get(event)?.push(func);
|
||||
return this;
|
||||
}
|
||||
|
||||
on(event: string, func: Function) {
|
||||
return this.#addListener(event, func);
|
||||
}
|
||||
on(event: string, func: Function) {
|
||||
return this.#addListener(event, func);
|
||||
}
|
||||
|
||||
#removeListener(event: string, func: Function) {
|
||||
if (this.listeners.has(event)) {
|
||||
const listener = this.listeners.get(event);
|
||||
#removeListener(event: string, func: Function) {
|
||||
if (this.listeners.has(event)) {
|
||||
const listener = this.listeners.get(event);
|
||||
|
||||
if (listener?.includes(func)) {
|
||||
listener.splice(listener.indexOf(func), 1);
|
||||
if (listener?.includes(func)) {
|
||||
listener.splice(listener.indexOf(func), 1);
|
||||
|
||||
if (listener.length === 0) {
|
||||
this.listeners.delete(event);
|
||||
if (listener.length === 0) {
|
||||
this.listeners.delete(event);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
off(event: string, func: Function) {
|
||||
return this.#removeListener(event, func);
|
||||
}
|
||||
off(event: string, func: Function) {
|
||||
return this.#removeListener(event, func);
|
||||
}
|
||||
|
||||
once(event: string, func: Function) {
|
||||
// it is important for this to be an arrow function
|
||||
const closure = () => {
|
||||
func();
|
||||
this.off(event, func);
|
||||
}
|
||||
once(event: string, func: Function) {
|
||||
// it is important for this to be an arrow function
|
||||
const closure = () => {
|
||||
func();
|
||||
this.off(event, func);
|
||||
};
|
||||
|
||||
const listener = this.listeners.get(event) ?? [];
|
||||
const listener = this.listeners.get(event) ?? [];
|
||||
|
||||
listener.push(closure);
|
||||
listener.push(closure);
|
||||
|
||||
return this;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
emit(event: string, ...args: unknown[]) {
|
||||
const listener = this.listeners.get(event);
|
||||
emit(event: string, ...args: unknown[]) {
|
||||
const listener = this.listeners.get(event);
|
||||
|
||||
if (!listener) {
|
||||
return false;
|
||||
}
|
||||
if (!listener) {
|
||||
return false;
|
||||
}
|
||||
|
||||
listener.forEach((f) => f(...args));
|
||||
listener.forEach((f) => f(...args));
|
||||
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
listenerCount(eventName: string) {
|
||||
return this.listeners.get(eventName)?.length ?? 0;
|
||||
}
|
||||
|
||||
rawListeners(eventName: string): Function[] | undefined {
|
||||
return this.listeners.get(eventName);
|
||||
}
|
||||
listenerCount(eventName: string) {
|
||||
return this.listeners.get(eventName)?.length ?? 0;
|
||||
}
|
||||
|
||||
rawListeners(eventName: string): Function[] | undefined {
|
||||
return this.listeners.get(eventName);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export default EventEmitter;
|
@ -1,3 +1,3 @@
|
||||
export function GATEWAY_BOT() {
|
||||
return "/gateway/bot";
|
||||
return "/gateway/bot";
|
||||
}
|
@ -5,7 +5,7 @@ export const DiscordEpoch = 14200704e5;
|
||||
|
||||
// utilities for Snowflakes
|
||||
export const Snowflake = {
|
||||
snowflakeToTimestamp(id: Snowflake) {
|
||||
snowflakeToTimestamp(id: Snowflake) {
|
||||
return (Number(id) >> 22) + DiscordEpoch;
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
2
vendor/gateway/README.md
vendored
2
vendor/gateway/README.md
vendored
@ -52,7 +52,7 @@ This WS service is meant for ADVANCED DEVELOPERS ONLY!
|
||||
|
||||
```ts
|
||||
createGatewayManager({
|
||||
// TODO: (docs) Fill this out
|
||||
// TODO: (docs) Fill this out
|
||||
});
|
||||
```
|
||||
|
||||
|
4
vendor/gateway/calculateShardId.ts
vendored
4
vendor/gateway/calculateShardId.ts
vendored
@ -1,7 +1,7 @@
|
||||
import { GatewayManager } from "./manager/gatewayManager.ts";
|
||||
|
||||
export function calculateShardId(gateway: GatewayManager, guildId: bigint) {
|
||||
if (gateway.manager.totalShards === 1) return 0;
|
||||
if (gateway.manager.totalShards === 1) return 0;
|
||||
|
||||
return Number((guildId >> 22n) % BigInt(gateway.manager.totalShards - 1));
|
||||
return Number((guildId >> 22n) % BigInt(gateway.manager.totalShards - 1));
|
||||
}
|
||||
|
20
vendor/gateway/manager/calculateTotalShards.ts
vendored
20
vendor/gateway/manager/calculateTotalShards.ts
vendored
@ -2,15 +2,15 @@ import { GatewayManager } from "./gatewayManager.ts";
|
||||
|
||||
/** Handler used to determine max number of shards to use based upon the max concurrency. */
|
||||
export function calculateTotalShards(gateway: GatewayManager): number {
|
||||
// Bots under 100k servers do not have access to total shards.
|
||||
if (gateway.manager.totalShards < 100) return gateway.manager.totalShards;
|
||||
// Bots under 100k servers do not have access to total shards.
|
||||
if (gateway.manager.totalShards < 100) return gateway.manager.totalShards;
|
||||
|
||||
// Calculate a multiple of `maxConcurrency` which can be used to connect to the gateway.
|
||||
return Math.ceil(
|
||||
gateway.manager.totalShards /
|
||||
// If `maxConcurrency` is 1 we can safely use 16.
|
||||
(gateway.gatewayBot.sessionStartLimit.maxConcurrency === 1
|
||||
? 16
|
||||
: gateway.gatewayBot.sessionStartLimit.maxConcurrency),
|
||||
) * gateway.gatewayBot.sessionStartLimit.maxConcurrency;
|
||||
// Calculate a multiple of `maxConcurrency` which can be used to connect to the gateway.
|
||||
return Math.ceil(
|
||||
gateway.manager.totalShards /
|
||||
// If `maxConcurrency` is 1 we can safely use 16.
|
||||
(gateway.gatewayBot.sessionStartLimit.maxConcurrency === 1
|
||||
? 16
|
||||
: gateway.gatewayBot.sessionStartLimit.maxConcurrency),
|
||||
) * gateway.gatewayBot.sessionStartLimit.maxConcurrency;
|
||||
}
|
||||
|
16
vendor/gateway/manager/calculateWorkerId.ts
vendored
16
vendor/gateway/manager/calculateWorkerId.ts
vendored
@ -1,13 +1,13 @@
|
||||
import { GatewayManager } from "./gatewayManager.ts";
|
||||
|
||||
export function calculateWorkerId(manager: GatewayManager, shardId: number) {
|
||||
// Ignore decimal numbers.
|
||||
let workerId = Math.floor((shardId) / manager.shardsPerWorker);
|
||||
// If the workerId overflows the maximal allowed workers we by default just use to last worker.
|
||||
if (workerId >= manager.totalWorkers) {
|
||||
// The Id of the last available worker is total -1
|
||||
workerId = manager.totalWorkers - 1;
|
||||
}
|
||||
// Ignore decimal numbers.
|
||||
let workerId = Math.floor((shardId) / manager.shardsPerWorker);
|
||||
// If the workerId overflows the maximal allowed workers we by default just use to last worker.
|
||||
if (workerId >= manager.totalWorkers) {
|
||||
// The Id of the last available worker is total -1
|
||||
workerId = manager.totalWorkers - 1;
|
||||
}
|
||||
|
||||
return workerId;
|
||||
return workerId;
|
||||
}
|
||||
|
474
vendor/gateway/manager/gatewayManager.ts
vendored
474
vendor/gateway/manager/gatewayManager.ts
vendored
@ -27,268 +27,268 @@ export type GatewayManager = ReturnType<typeof createGatewayManager>;
|
||||
* bots.
|
||||
*/
|
||||
export function createGatewayManager(
|
||||
options: PickPartial<CreateGatewayManager, "handleDiscordPayload" | "gatewayBot" | "gatewayConfig">,
|
||||
options: PickPartial<CreateGatewayManager, "handleDiscordPayload" | "gatewayBot" | "gatewayConfig">,
|
||||
) {
|
||||
const prepareBucketsOverwritten = options.prepareBuckets ?? prepareBuckets;
|
||||
const spawnShardsOverwritten = options.spawnShards ?? spawnShards;
|
||||
const stopOverwritten = options.stop ?? stop;
|
||||
const tellWorkerToIdentifyOverwritten = options.tellWorkerToIdentify ?? tellWorkerToIdentify;
|
||||
const calculateTotalShardsOverwritten = options.calculateTotalShards ?? calculateTotalShards;
|
||||
const calculateWorkerIdOverwritten = options.calculateWorkerId ?? calculateWorkerId;
|
||||
const prepareBucketsOverwritten = options.prepareBuckets ?? prepareBuckets;
|
||||
const spawnShardsOverwritten = options.spawnShards ?? spawnShards;
|
||||
const stopOverwritten = options.stop ?? stop;
|
||||
const tellWorkerToIdentifyOverwritten = options.tellWorkerToIdentify ?? tellWorkerToIdentify;
|
||||
const calculateTotalShardsOverwritten = options.calculateTotalShards ?? calculateTotalShards;
|
||||
const calculateWorkerIdOverwritten = options.calculateWorkerId ?? calculateWorkerId;
|
||||
|
||||
const totalShards = options.totalShards ?? options.gatewayBot.shards ?? 1;
|
||||
const totalShards = options.totalShards ?? options.gatewayBot.shards ?? 1;
|
||||
|
||||
const gatewayManager = {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
const gatewayManager = {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
|
||||
/** The max concurrency buckets.
|
||||
* Those will be created when the `spawnShards` (which calls `prepareBuckets` under the hood) function gets called.
|
||||
*/
|
||||
buckets: new Map<
|
||||
number,
|
||||
{
|
||||
workers: { id: number; queue: number[] }[];
|
||||
leak: LeakyBucket;
|
||||
}
|
||||
>(),
|
||||
/** The max concurrency buckets.
|
||||
* Those will be created when the `spawnShards` (which calls `prepareBuckets` under the hood) function gets called.
|
||||
*/
|
||||
buckets: new Map<
|
||||
number,
|
||||
{
|
||||
workers: { id: number; queue: number[] }[];
|
||||
leak: LeakyBucket;
|
||||
}
|
||||
>(),
|
||||
/** Id of the first Shard which should get controlled by this manager.
|
||||
*
|
||||
* NOTE: This is intended for testing purposes
|
||||
* if big bots want to test the gateway on smaller scale.
|
||||
* This is not recommended to be used in production.
|
||||
*/
|
||||
firstShardId: options.firstShardId ?? 0,
|
||||
/** Important data which is used by the manager to connect shards to the gateway. */
|
||||
gatewayBot: options.gatewayBot,
|
||||
/** Id of the last Shard which should get controlled by this manager.
|
||||
*
|
||||
* NOTE: This is intended for testing purposes
|
||||
* if big bots want to test the gateway on smaller scale.
|
||||
* This is not recommended to be used in production.
|
||||
*/
|
||||
lastShardId: options.lastShardId ?? totalShards - 1 ?? 1,
|
||||
/** This is where the Shards get stored.
|
||||
* This will not be used when having a custom workers solution.
|
||||
*/
|
||||
manager: {} as ShardManager,
|
||||
/** Delay in milliseconds to wait before spawning next shard.
|
||||
* OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!!
|
||||
*/
|
||||
spawnShardDelay: options.spawnShardDelay ?? 5300,
|
||||
/** How many Shards should get assigned to a Worker.
|
||||
*
|
||||
* IMPORTANT: Discordeno will NOT spawn Workers for you.
|
||||
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
|
||||
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
|
||||
*
|
||||
* NOTE: The last Worker will IGNORE this value,
|
||||
* which means that the last worker can get assigned an unlimited amount of shards.
|
||||
* This is not a bug but intended behavior and means you have to assign more workers to this manager.
|
||||
*/
|
||||
shardsPerWorker: options.shardsPerWorker ?? 25,
|
||||
/** The total amount of Workers which get controlled by this manager.
|
||||
*
|
||||
* IMPORTANT: Discordeno will NOT spawn Workers for you.
|
||||
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
|
||||
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
|
||||
*/
|
||||
totalWorkers: options.totalWorkers ?? 4,
|
||||
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
/** Prepares the buckets for identifying.
|
||||
*
|
||||
* NOTE: Most of the time this function does not need to be called,
|
||||
* since it gets called by the `spawnShards` function indirectly.
|
||||
*/
|
||||
prepareBuckets: function () {
|
||||
return prepareBucketsOverwritten(this);
|
||||
},
|
||||
/** This function starts to spawn the Shards assigned to this manager.
|
||||
*
|
||||
* The managers `buckets` will be created and
|
||||
*
|
||||
* if `resharding.useOptimalLargeBotSharding` is set to true,
|
||||
* `totalShards` gets double checked and adjusted accordingly if wrong.
|
||||
*/
|
||||
spawnShards: function () {
|
||||
return spawnShardsOverwritten(this);
|
||||
},
|
||||
/** Stop the gateway. This closes all shards. */
|
||||
stop: function (code: number, reason: string) {
|
||||
return stopOverwritten(this, code, reason);
|
||||
},
|
||||
/** Tell the Worker with this Id to identify this Shard.
|
||||
*
|
||||
* Useful if a custom Worker solution should be used.
|
||||
*
|
||||
* IMPORTANT: Discordeno will NOT spawn Workers for you.
|
||||
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
|
||||
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
|
||||
*/
|
||||
tellWorkerToIdentify: function (workerId: number, shardId: number, bucketId: number) {
|
||||
return tellWorkerToIdentifyOverwritten(this, workerId, shardId, bucketId);
|
||||
},
|
||||
// TODO: fix debug
|
||||
/** Handle the different logs. Used for debugging. */
|
||||
debug: options.debug || function () {},
|
||||
|
||||
// /** The methods related to resharding. */
|
||||
// resharding: {
|
||||
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
|
||||
// useOptimalLargeBotSharding: options.resharding?.useOptimalLargeBotSharding ?? true,
|
||||
// /** Whether or not to automatically reshard.
|
||||
// *
|
||||
// * @default true
|
||||
// */
|
||||
// reshard: options.resharding?.reshard ?? true,
|
||||
// /** The percentage at which resharding should occur.
|
||||
// *
|
||||
// * @default 80
|
||||
// */
|
||||
// reshardPercentage: options.resharding?.reshardPercentage ?? 80,
|
||||
// /** Handles resharding the bot when necessary. */
|
||||
// resharder: options.resharding?.resharder ?? resharder,
|
||||
// /** Handles checking if all new shards are online in the new gateway. */
|
||||
// isPending: options.resharding?.isPending ?? resharderIsPending,
|
||||
// /** Handles closing all shards in the old gateway. */
|
||||
// closeOldShards: options.resharding?.closeOldShards ?? resharderCloseOldShards,
|
||||
// /** Handles checking if it is time to reshard and triggers the resharder. */
|
||||
// check: options.resharding?.check ?? startReshardingChecks,
|
||||
// /** Handler to mark a guild id with its new shard id in cache. */
|
||||
// markNewGuildShardId: options.resharding?.markNewGuildShardId ?? markNewGuildShardId,
|
||||
// /** Handler to update all guilds in cache with the new shard id. */
|
||||
// editGuildShardIds: options.resharding?.editGuildShardIds ?? reshardingEditGuildShardIds,
|
||||
// },
|
||||
|
||||
/** Calculate the amount of Shards which should be used based on the bot's max concurrency. */
|
||||
calculateTotalShards: function () {
|
||||
return calculateTotalShardsOverwritten(this);
|
||||
},
|
||||
|
||||
/** Calculate the Id of the Worker related to this Shard. */
|
||||
calculateWorkerId: function (shardId: number) {
|
||||
return calculateWorkerIdOverwritten(this, shardId);
|
||||
},
|
||||
};
|
||||
|
||||
gatewayManager.manager = createShardManager({
|
||||
createShardOptions: options.createShardOptions,
|
||||
gatewayConfig: options.gatewayConfig,
|
||||
shardIds: [],
|
||||
totalShards,
|
||||
|
||||
handleMessage: function (shard, message) {
|
||||
return options.handleDiscordPayload(shard, message);
|
||||
},
|
||||
|
||||
requestIdentify: async (shardId) => {
|
||||
// TODO: improve
|
||||
await gatewayManager.buckets.get(shardId % gatewayManager.gatewayBot.sessionStartLimit.maxConcurrency)!.leak
|
||||
.acquire(1);
|
||||
},
|
||||
});
|
||||
|
||||
return gatewayManager;
|
||||
}
|
||||
|
||||
export interface CreateGatewayManager {
|
||||
/** Delay in milliseconds to wait before spawning next shard. OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!! */
|
||||
spawnShardDelay: number;
|
||||
/** Total amount of shards your bot uses. Useful for zero-downtime updates or resharding. */
|
||||
totalShards: number;
|
||||
/** The amount of shards to load per worker. */
|
||||
shardsPerWorker: number;
|
||||
/** The total amount of workers to use for your bot. */
|
||||
totalWorkers: number;
|
||||
/** Id of the first Shard which should get controlled by this manager.
|
||||
*
|
||||
* NOTE: This is intended for testing purposes
|
||||
* if big bots want to test the gateway on smaller scale.
|
||||
* This is not recommended to be used in production.
|
||||
*/
|
||||
firstShardId: options.firstShardId ?? 0,
|
||||
/** Important data which is used by the manager to connect shards to the gateway. */
|
||||
gatewayBot: options.gatewayBot,
|
||||
firstShardId: number;
|
||||
/** Id of the last Shard which should get controlled by this manager.
|
||||
*
|
||||
* NOTE: This is intended for testing purposes
|
||||
* if big bots want to test the gateway on smaller scale.
|
||||
* This is not recommended to be used in production.
|
||||
*/
|
||||
lastShardId: options.lastShardId ?? totalShards - 1 ?? 1,
|
||||
/** This is where the Shards get stored.
|
||||
* This will not be used when having a custom workers solution.
|
||||
*/
|
||||
manager: {} as ShardManager,
|
||||
/** Delay in milliseconds to wait before spawning next shard.
|
||||
* OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!!
|
||||
*/
|
||||
spawnShardDelay: options.spawnShardDelay ?? 5300,
|
||||
/** How many Shards should get assigned to a Worker.
|
||||
*
|
||||
* IMPORTANT: Discordeno will NOT spawn Workers for you.
|
||||
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
|
||||
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
|
||||
*
|
||||
* NOTE: The last Worker will IGNORE this value,
|
||||
* which means that the last worker can get assigned an unlimited amount of shards.
|
||||
* This is not a bug but intended behavior and means you have to assign more workers to this manager.
|
||||
*/
|
||||
shardsPerWorker: options.shardsPerWorker ?? 25,
|
||||
/** The total amount of Workers which get controlled by this manager.
|
||||
*
|
||||
* IMPORTANT: Discordeno will NOT spawn Workers for you.
|
||||
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
|
||||
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
|
||||
*/
|
||||
totalWorkers: options.totalWorkers ?? 4,
|
||||
lastShardId: number;
|
||||
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
/** Prepares the buckets for identifying.
|
||||
*
|
||||
* NOTE: Most of the time this function does not need to be called,
|
||||
* since it gets called by the `spawnShards` function indirectly.
|
||||
*/
|
||||
prepareBuckets: function () {
|
||||
return prepareBucketsOverwritten(this);
|
||||
},
|
||||
/** This function starts to spawn the Shards assigned to this manager.
|
||||
*
|
||||
* The managers `buckets` will be created and
|
||||
*
|
||||
* if `resharding.useOptimalLargeBotSharding` is set to true,
|
||||
* `totalShards` gets double checked and adjusted accordingly if wrong.
|
||||
*/
|
||||
spawnShards: function () {
|
||||
return spawnShardsOverwritten(this);
|
||||
},
|
||||
/** Stop the gateway. This closes all shards. */
|
||||
stop: function (code: number, reason: string) {
|
||||
return stopOverwritten(this, code, reason);
|
||||
},
|
||||
/** Tell the Worker with this Id to identify this Shard.
|
||||
*
|
||||
* Useful if a custom Worker solution should be used.
|
||||
*
|
||||
* IMPORTANT: Discordeno will NOT spawn Workers for you.
|
||||
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
|
||||
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
|
||||
*/
|
||||
tellWorkerToIdentify: function (workerId: number, shardId: number, bucketId: number) {
|
||||
return tellWorkerToIdentifyOverwritten(this, workerId, shardId, bucketId);
|
||||
},
|
||||
// TODO: fix debug
|
||||
/** Important data which is used by the manager to connect shards to the gateway. */
|
||||
gatewayBot: GatewayBot;
|
||||
|
||||
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
|
||||
|
||||
/** Options which are used to create a new shard. */
|
||||
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
|
||||
|
||||
/** Stored as bucketId: { workers: [workerId, [ShardIds]], createNextShard: boolean } */
|
||||
buckets: Map<
|
||||
number,
|
||||
{
|
||||
workers: { id: number; queue: number[] }[];
|
||||
leak: LeakyBucket;
|
||||
}
|
||||
>;
|
||||
// METHODS
|
||||
|
||||
/** Prepares the buckets for identifying */
|
||||
prepareBuckets: typeof prepareBuckets;
|
||||
/** The handler for spawning ALL the shards. */
|
||||
spawnShards: typeof spawnShards;
|
||||
/** The handler to close all shards. */
|
||||
stop: typeof stop;
|
||||
/** Sends the discord payload to another server. */
|
||||
handleDiscordPayload: (shard: Shard, data: DiscordGatewayPayload) => any;
|
||||
/** Tell the worker to begin identifying this shard */
|
||||
tellWorkerToIdentify: typeof tellWorkerToIdentify;
|
||||
/** Handle the different logs. Used for debugging. */
|
||||
debug: options.debug || function () {},
|
||||
|
||||
// /** The methods related to resharding. */
|
||||
debug: (text: GatewayDebugEvents, ...args: any[]) => unknown;
|
||||
/** The methods related to resharding. */
|
||||
// resharding: {
|
||||
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
|
||||
// useOptimalLargeBotSharding: options.resharding?.useOptimalLargeBotSharding ?? true,
|
||||
// /** Whether or not to automatically reshard.
|
||||
// *
|
||||
// * @default true
|
||||
// */
|
||||
// reshard: options.resharding?.reshard ?? true,
|
||||
// /** The percentage at which resharding should occur.
|
||||
// *
|
||||
// * @default 80
|
||||
// */
|
||||
// reshardPercentage: options.resharding?.reshardPercentage ?? 80,
|
||||
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when you are above 100K servers. */
|
||||
// useOptimalLargeBotSharding: boolean;
|
||||
// /** Whether or not to automatically reshard. */
|
||||
// reshard: boolean;
|
||||
// /** The percentage at which resharding should occur. */
|
||||
// reshardPercentage: number;
|
||||
// /** Handles resharding the bot when necessary. */
|
||||
// resharder: options.resharding?.resharder ?? resharder,
|
||||
// resharder: typeof resharder;
|
||||
// /** Handles checking if all new shards are online in the new gateway. */
|
||||
// isPending: options.resharding?.isPending ?? resharderIsPending,
|
||||
// isPending: typeof resharderIsPending;
|
||||
// /** Handles closing all shards in the old gateway. */
|
||||
// closeOldShards: options.resharding?.closeOldShards ?? resharderCloseOldShards,
|
||||
// /** Handles checking if it is time to reshard and triggers the resharder. */
|
||||
// check: options.resharding?.check ?? startReshardingChecks,
|
||||
// closeOldShards: typeof resharderCloseOldShards;
|
||||
// /** Handler to mark a guild id with its new shard id in cache. */
|
||||
// markNewGuildShardId: options.resharding?.markNewGuildShardId ?? markNewGuildShardId,
|
||||
// markNewGuildShardId: typeof markNewGuildShardId;
|
||||
// /** Handler to update all guilds in cache with the new shard id. */
|
||||
// editGuildShardIds: options.resharding?.editGuildShardIds ?? reshardingEditGuildShardIds,
|
||||
// },
|
||||
// editGuildShardIds: typeof reshardingEditGuildShardIds;
|
||||
// };
|
||||
/** Calculates the number of shards to use based on the max concurrency */
|
||||
calculateTotalShards: typeof calculateTotalShards;
|
||||
|
||||
/** Calculate the amount of Shards which should be used based on the bot's max concurrency. */
|
||||
calculateTotalShards: function () {
|
||||
return calculateTotalShardsOverwritten(this);
|
||||
},
|
||||
|
||||
/** Calculate the Id of the Worker related to this Shard. */
|
||||
calculateWorkerId: function (shardId: number) {
|
||||
return calculateWorkerIdOverwritten(this, shardId);
|
||||
},
|
||||
};
|
||||
|
||||
gatewayManager.manager = createShardManager({
|
||||
createShardOptions: options.createShardOptions,
|
||||
gatewayConfig: options.gatewayConfig,
|
||||
shardIds: [],
|
||||
totalShards,
|
||||
|
||||
handleMessage: function (shard, message) {
|
||||
return options.handleDiscordPayload(shard, message);
|
||||
},
|
||||
|
||||
requestIdentify: async (shardId) => {
|
||||
// TODO: improve
|
||||
await gatewayManager.buckets.get(shardId % gatewayManager.gatewayBot.sessionStartLimit.maxConcurrency)!.leak
|
||||
.acquire(1);
|
||||
},
|
||||
});
|
||||
|
||||
return gatewayManager;
|
||||
}
|
||||
|
||||
export interface CreateGatewayManager {
|
||||
/** Delay in milliseconds to wait before spawning next shard. OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!! */
|
||||
spawnShardDelay: number;
|
||||
/** Total amount of shards your bot uses. Useful for zero-downtime updates or resharding. */
|
||||
totalShards: number;
|
||||
/** The amount of shards to load per worker. */
|
||||
shardsPerWorker: number;
|
||||
/** The total amount of workers to use for your bot. */
|
||||
totalWorkers: number;
|
||||
/** Id of the first Shard which should get controlled by this manager.
|
||||
*
|
||||
* NOTE: This is intended for testing purposes
|
||||
* if big bots want to test the gateway on smaller scale.
|
||||
* This is not recommended to be used in production.
|
||||
*/
|
||||
firstShardId: number;
|
||||
/** Id of the last Shard which should get controlled by this manager.
|
||||
*
|
||||
* NOTE: This is intended for testing purposes
|
||||
* if big bots want to test the gateway on smaller scale.
|
||||
* This is not recommended to be used in production.
|
||||
*/
|
||||
lastShardId: number;
|
||||
|
||||
/** Important data which is used by the manager to connect shards to the gateway. */
|
||||
gatewayBot: GatewayBot;
|
||||
|
||||
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
|
||||
|
||||
/** Options which are used to create a new shard. */
|
||||
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
|
||||
|
||||
/** Stored as bucketId: { workers: [workerId, [ShardIds]], createNextShard: boolean } */
|
||||
buckets: Map<
|
||||
number,
|
||||
{
|
||||
workers: { id: number; queue: number[] }[];
|
||||
leak: LeakyBucket;
|
||||
}
|
||||
>;
|
||||
// METHODS
|
||||
|
||||
/** Prepares the buckets for identifying */
|
||||
prepareBuckets: typeof prepareBuckets;
|
||||
/** The handler for spawning ALL the shards. */
|
||||
spawnShards: typeof spawnShards;
|
||||
/** The handler to close all shards. */
|
||||
stop: typeof stop;
|
||||
/** Sends the discord payload to another server. */
|
||||
handleDiscordPayload: (shard: Shard, data: DiscordGatewayPayload) => any;
|
||||
/** Tell the worker to begin identifying this shard */
|
||||
tellWorkerToIdentify: typeof tellWorkerToIdentify;
|
||||
/** Handle the different logs. Used for debugging. */
|
||||
debug: (text: GatewayDebugEvents, ...args: any[]) => unknown;
|
||||
/** The methods related to resharding. */
|
||||
// resharding: {
|
||||
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when you are above 100K servers. */
|
||||
// useOptimalLargeBotSharding: boolean;
|
||||
// /** Whether or not to automatically reshard. */
|
||||
// reshard: boolean;
|
||||
// /** The percentage at which resharding should occur. */
|
||||
// reshardPercentage: number;
|
||||
// /** Handles resharding the bot when necessary. */
|
||||
// resharder: typeof resharder;
|
||||
// /** Handles checking if all new shards are online in the new gateway. */
|
||||
// isPending: typeof resharderIsPending;
|
||||
// /** Handles closing all shards in the old gateway. */
|
||||
// closeOldShards: typeof resharderCloseOldShards;
|
||||
// /** Handler to mark a guild id with its new shard id in cache. */
|
||||
// markNewGuildShardId: typeof markNewGuildShardId;
|
||||
// /** Handler to update all guilds in cache with the new shard id. */
|
||||
// editGuildShardIds: typeof reshardingEditGuildShardIds;
|
||||
// };
|
||||
/** Calculates the number of shards to use based on the max concurrency */
|
||||
calculateTotalShards: typeof calculateTotalShards;
|
||||
|
||||
/** Calculate the id of the worker related ot this Shard. */
|
||||
calculateWorkerId: typeof calculateWorkerId;
|
||||
/** Calculate the id of the worker related ot this Shard. */
|
||||
calculateWorkerId: typeof calculateWorkerId;
|
||||
}
|
||||
|
||||
export type GatewayDebugEvents =
|
||||
| "GW ERROR"
|
||||
| "GW CLOSED"
|
||||
| "GW CLOSED_RECONNECT"
|
||||
| "GW RAW"
|
||||
| "GW RECONNECT"
|
||||
| "GW INVALID_SESSION"
|
||||
| "GW RESUMED"
|
||||
| "GW RESUMING"
|
||||
| "GW IDENTIFYING"
|
||||
| "GW RAW_SEND"
|
||||
| "GW MAX REQUESTS"
|
||||
| "GW DEBUG"
|
||||
| "GW HEARTBEATING"
|
||||
| "GW HEARTBEATING_STARTED"
|
||||
| "GW HEARTBEATING_DETAILS"
|
||||
| "GW HEARTBEATING_CLOSED";
|
||||
| "GW ERROR"
|
||||
| "GW CLOSED"
|
||||
| "GW CLOSED_RECONNECT"
|
||||
| "GW RAW"
|
||||
| "GW RECONNECT"
|
||||
| "GW INVALID_SESSION"
|
||||
| "GW RESUMED"
|
||||
| "GW RESUMING"
|
||||
| "GW IDENTIFYING"
|
||||
| "GW RAW_SEND"
|
||||
| "GW MAX REQUESTS"
|
||||
| "GW DEBUG"
|
||||
| "GW HEARTBEATING"
|
||||
| "GW HEARTBEATING_STARTED"
|
||||
| "GW HEARTBEATING_DETAILS"
|
||||
| "GW HEARTBEATING_CLOSED";
|
||||
|
76
vendor/gateway/manager/prepareBuckets.ts
vendored
76
vendor/gateway/manager/prepareBuckets.ts
vendored
@ -2,46 +2,46 @@ import { createLeakyBucket } from "../../util/bucket.ts";
|
||||
import { GatewayManager } from "./gatewayManager.ts";
|
||||
|
||||
export function prepareBuckets(gateway: GatewayManager) {
|
||||
for (let i = 0; i < gateway.gatewayBot.sessionStartLimit.maxConcurrency; ++i) {
|
||||
gateway.buckets.set(i, {
|
||||
workers: [],
|
||||
leak: createLeakyBucket({
|
||||
max: 1,
|
||||
refillAmount: 1,
|
||||
// special number which is proven to be working dont change
|
||||
refillInterval: gateway.spawnShardDelay,
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
// ORGANIZE ALL SHARDS INTO THEIR OWN BUCKETS
|
||||
for (let shardId = gateway.firstShardId; shardId <= gateway.lastShardId; ++shardId) {
|
||||
// gateway.debug("GW DEBUG", `1. Running for loop in spawnShards function for shardId ${i}.`);
|
||||
if (shardId >= gateway.manager.totalShards) {
|
||||
throw new Error(
|
||||
`Shard (id: ${shardId}) is bigger or equal to the used amount of used shards which is ${gateway.manager.totalShards}`,
|
||||
);
|
||||
for (let i = 0; i < gateway.gatewayBot.sessionStartLimit.maxConcurrency; ++i) {
|
||||
gateway.buckets.set(i, {
|
||||
workers: [],
|
||||
leak: createLeakyBucket({
|
||||
max: 1,
|
||||
refillAmount: 1,
|
||||
// special number which is proven to be working dont change
|
||||
refillInterval: gateway.spawnShardDelay,
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
const bucketId = shardId % gateway.gatewayBot.sessionStartLimit.maxConcurrency;
|
||||
const bucket = gateway.buckets.get(bucketId);
|
||||
if (!bucket) {
|
||||
throw new Error(
|
||||
`Shard (id: ${shardId}) got assigned to an illegal bucket id: ${bucketId}, expected a bucket id between 0 and ${
|
||||
gateway.gatewayBot.sessionStartLimit.maxConcurrency - 1
|
||||
}`,
|
||||
);
|
||||
}
|
||||
// ORGANIZE ALL SHARDS INTO THEIR OWN BUCKETS
|
||||
for (let shardId = gateway.firstShardId; shardId <= gateway.lastShardId; ++shardId) {
|
||||
// gateway.debug("GW DEBUG", `1. Running for loop in spawnShards function for shardId ${i}.`);
|
||||
if (shardId >= gateway.manager.totalShards) {
|
||||
throw new Error(
|
||||
`Shard (id: ${shardId}) is bigger or equal to the used amount of used shards which is ${gateway.manager.totalShards}`,
|
||||
);
|
||||
}
|
||||
|
||||
// FIND A QUEUE IN THIS BUCKET THAT HAS SPACE
|
||||
// const worker = bucket.workers.find((w) => w.queue.length < gateway.shardsPerWorker);
|
||||
const workerId = gateway.calculateWorkerId(shardId);
|
||||
const worker = bucket.workers.find((w) => w.id === workerId);
|
||||
if (worker) {
|
||||
// IF THE QUEUE HAS SPACE JUST ADD IT TO THIS QUEUE
|
||||
worker.queue.push(shardId);
|
||||
} else {
|
||||
bucket.workers.push({ id: workerId, queue: [shardId] });
|
||||
const bucketId = shardId % gateway.gatewayBot.sessionStartLimit.maxConcurrency;
|
||||
const bucket = gateway.buckets.get(bucketId);
|
||||
if (!bucket) {
|
||||
throw new Error(
|
||||
`Shard (id: ${shardId}) got assigned to an illegal bucket id: ${bucketId}, expected a bucket id between 0 and ${
|
||||
gateway.gatewayBot.sessionStartLimit.maxConcurrency - 1
|
||||
}`,
|
||||
);
|
||||
}
|
||||
|
||||
// FIND A QUEUE IN THIS BUCKET THAT HAS SPACE
|
||||
// const worker = bucket.workers.find((w) => w.queue.length < gateway.shardsPerWorker);
|
||||
const workerId = gateway.calculateWorkerId(shardId);
|
||||
const worker = bucket.workers.find((w) => w.id === workerId);
|
||||
if (worker) {
|
||||
// IF THE QUEUE HAS SPACE JUST ADD IT TO THIS QUEUE
|
||||
worker.queue.push(shardId);
|
||||
} else {
|
||||
bucket.workers.push({ id: workerId, queue: [shardId] });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
292
vendor/gateway/manager/resharder.ts
vendored
292
vendor/gateway/manager/resharder.ts
vendored
@ -4,56 +4,56 @@ import { createGatewayManager, GatewayManager } from "./gatewayManager.ts";
|
||||
export type Resharder = ReturnType<typeof activateResharder>;
|
||||
|
||||
export function activateResharder(options: ActivateResharderOptions) {
|
||||
const resharder = {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
const resharder = {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
|
||||
/** Interval in milliseconds of when to check whether it's time to reshard.
|
||||
*
|
||||
* @default 28800000 (8 hours)
|
||||
*/
|
||||
checkInterval: options.checkInterval || 28800000,
|
||||
/** Interval in milliseconds of when to check whether it's time to reshard.
|
||||
*
|
||||
* @default 28800000 (8 hours)
|
||||
*/
|
||||
checkInterval: options.checkInterval || 28800000,
|
||||
|
||||
/** Gateway manager which is currently processing all shards and events. */
|
||||
gateway: options.gatewayManager,
|
||||
/** Gateway manager which is currently processing all shards and events. */
|
||||
gateway: options.gatewayManager,
|
||||
|
||||
/** Timeout of the reshard checker. */
|
||||
intervalId: undefined as number | undefined,
|
||||
/** Timeout of the reshard checker. */
|
||||
intervalId: undefined as number | undefined,
|
||||
|
||||
/** Percentage at which resharding should occur.
|
||||
* @default 80
|
||||
*/
|
||||
percentage: options.percentage ?? 80,
|
||||
/** Percentage at which resharding should occur.
|
||||
* @default 80
|
||||
*/
|
||||
percentage: options.percentage ?? 80,
|
||||
|
||||
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
|
||||
useOptimalLargeBotSharding: options.useOptimalLargeBotSharding ?? true,
|
||||
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
|
||||
useOptimalLargeBotSharding: options.useOptimalLargeBotSharding ?? true,
|
||||
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
|
||||
/** Activate the resharder and delay the next reshard check. */
|
||||
activate: function () {
|
||||
return activate(this);
|
||||
},
|
||||
/** Activate the resharder and delay the next reshard check. */
|
||||
activate: function () {
|
||||
return activate(this);
|
||||
},
|
||||
|
||||
/** Function which is used to fetch the current gateway information of the bot.
|
||||
* This function is mainly used by the reshard checker.
|
||||
*/
|
||||
getGatewayBot: options.getGatewayBot,
|
||||
/** Function which is used to fetch the current gateway information of the bot.
|
||||
* This function is mainly used by the reshard checker.
|
||||
*/
|
||||
getGatewayBot: options.getGatewayBot,
|
||||
|
||||
/** Reshard the bots gateway. */
|
||||
reshard: function (gatewayBot: GatewayBot) {
|
||||
return reshard(this, gatewayBot);
|
||||
},
|
||||
/** Reshard the bots gateway. */
|
||||
reshard: function (gatewayBot: GatewayBot) {
|
||||
return reshard(this, gatewayBot);
|
||||
},
|
||||
|
||||
tellWorkerToPrepare: options.tellWorkerToPrepare,
|
||||
};
|
||||
tellWorkerToPrepare: options.tellWorkerToPrepare,
|
||||
};
|
||||
|
||||
resharder.activate();
|
||||
resharder.activate();
|
||||
|
||||
return resharder;
|
||||
return resharder;
|
||||
}
|
||||
|
||||
// /** The methods related to resharding. */
|
||||
@ -85,106 +85,106 @@ export function activateResharder(options: ActivateResharderOptions) {
|
||||
// },
|
||||
|
||||
export interface ActivateResharderOptions {
|
||||
/** Interval in milliseconds of when to check whether it's time to reshard.
|
||||
*
|
||||
* @default 28800000 (8 hours)
|
||||
*/
|
||||
checkInterval?: number;
|
||||
/** Gateway manager which the resharder should be bound to. */
|
||||
gatewayManager: GatewayManager;
|
||||
/** Percentage at which resharding should occur.
|
||||
* @default 80
|
||||
*/
|
||||
percentage?: number;
|
||||
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
|
||||
useOptimalLargeBotSharding?: boolean;
|
||||
/** Interval in milliseconds of when to check whether it's time to reshard.
|
||||
*
|
||||
* @default 28800000 (8 hours)
|
||||
*/
|
||||
checkInterval?: number;
|
||||
/** Gateway manager which the resharder should be bound to. */
|
||||
gatewayManager: GatewayManager;
|
||||
/** Percentage at which resharding should occur.
|
||||
* @default 80
|
||||
*/
|
||||
percentage?: number;
|
||||
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
|
||||
useOptimalLargeBotSharding?: boolean;
|
||||
|
||||
/** Function which can be used to fetch the current gateway information of the bot.
|
||||
* This function is mainly used by the reshard checker.
|
||||
*/
|
||||
getGatewayBot(): Promise<GatewayBot>;
|
||||
/** Function which can be used to fetch the current gateway information of the bot.
|
||||
* This function is mainly used by the reshard checker.
|
||||
*/
|
||||
getGatewayBot(): Promise<GatewayBot>;
|
||||
|
||||
/** Function which is used to tell a Worker that it should identify a resharder Shard to the gateway and wait for further instructions.
|
||||
* The worker should **NOT** process any events coming from this Shard.
|
||||
*/
|
||||
tellWorkerToPrepare(
|
||||
gatewayManager: GatewayManager,
|
||||
workerId: number,
|
||||
shardId: number,
|
||||
bucketId: number,
|
||||
): Promise<void>;
|
||||
/** Function which is used to tell a Worker that it should identify a resharder Shard to the gateway and wait for further instructions.
|
||||
* The worker should **NOT** process any events coming from this Shard.
|
||||
*/
|
||||
tellWorkerToPrepare(
|
||||
gatewayManager: GatewayManager,
|
||||
workerId: number,
|
||||
shardId: number,
|
||||
bucketId: number,
|
||||
): Promise<void>;
|
||||
}
|
||||
|
||||
/** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
|
||||
export function activate(resharder: Resharder): void {
|
||||
if (resharder.intervalId !== undefined) {
|
||||
throw new Error("[RESHARDER] Cannot activate the resharder more than one time.");
|
||||
}
|
||||
if (resharder.intervalId !== undefined) {
|
||||
throw new Error("[RESHARDER] Cannot activate the resharder more than one time.");
|
||||
}
|
||||
|
||||
resharder.intervalId = setInterval(async () => {
|
||||
// gateway.debug("GW DEBUG", "[Resharding] Checking if resharding is needed.");
|
||||
resharder.intervalId = setInterval(async () => {
|
||||
// gateway.debug("GW DEBUG", "[Resharding] Checking if resharding is needed.");
|
||||
|
||||
// TODO: is it possible to route this to REST?
|
||||
const result = await resharder.getGatewayBot();
|
||||
// TODO: is it possible to route this to REST?
|
||||
const result = await resharder.getGatewayBot();
|
||||
|
||||
const percentage =
|
||||
((result.shards - resharder.gateway.manager.totalShards) / resharder.gateway.manager.totalShards) * 100;
|
||||
// Less than necessary% being used so do nothing
|
||||
if (percentage < resharder.percentage) return;
|
||||
const percentage =
|
||||
((result.shards - resharder.gateway.manager.totalShards) / resharder.gateway.manager.totalShards) * 100;
|
||||
// Less than necessary% being used so do nothing
|
||||
if (percentage < resharder.percentage) return;
|
||||
|
||||
// Don't have enough identify rate limits to reshard
|
||||
if (result.sessionStartLimit.remaining < result.shards) return;
|
||||
// Don't have enough identify rate limits to reshard
|
||||
if (result.sessionStartLimit.remaining < result.shards) return;
|
||||
|
||||
// MULTI-SERVER BOTS OVERRIDE THIS IF YOU NEED TO RESHARD SERVER BY SERVER
|
||||
return resharder.reshard(result);
|
||||
}, resharder.checkInterval);
|
||||
// MULTI-SERVER BOTS OVERRIDE THIS IF YOU NEED TO RESHARD SERVER BY SERVER
|
||||
return resharder.reshard(result);
|
||||
}, resharder.checkInterval);
|
||||
}
|
||||
|
||||
export async function reshard(resharder: Resharder, gatewayBot: GatewayBot) {
|
||||
// oldGateway.debug("GW DEBUG", "[Resharding] Starting the reshard process.");
|
||||
// oldGateway.debug("GW DEBUG", "[Resharding] Starting the reshard process.");
|
||||
|
||||
// Create a temporary gateway manager for easier handling.
|
||||
const tmpManager = createGatewayManager({
|
||||
gatewayBot: gatewayBot,
|
||||
gatewayConfig: resharder.gateway.manager.gatewayConfig,
|
||||
handleDiscordPayload: () => {},
|
||||
tellWorkerToIdentify: resharder.tellWorkerToPrepare,
|
||||
});
|
||||
// Create a temporary gateway manager for easier handling.
|
||||
const tmpManager = createGatewayManager({
|
||||
gatewayBot: gatewayBot,
|
||||
gatewayConfig: resharder.gateway.manager.gatewayConfig,
|
||||
handleDiscordPayload: () => {},
|
||||
tellWorkerToIdentify: resharder.tellWorkerToPrepare,
|
||||
});
|
||||
|
||||
// Begin resharding
|
||||
// Begin resharding
|
||||
|
||||
// If more than 100K servers, begin switching to 16x sharding
|
||||
if (resharder.useOptimalLargeBotSharding) {
|
||||
// gateway.debug("GW DEBUG", "[Resharding] Using optimal large bot sharding solution.");
|
||||
tmpManager.manager.totalShards = resharder.gateway.calculateTotalShards(resharder.gateway);
|
||||
}
|
||||
// If more than 100K servers, begin switching to 16x sharding
|
||||
if (resharder.useOptimalLargeBotSharding) {
|
||||
// gateway.debug("GW DEBUG", "[Resharding] Using optimal large bot sharding solution.");
|
||||
tmpManager.manager.totalShards = resharder.gateway.calculateTotalShards(resharder.gateway);
|
||||
}
|
||||
|
||||
tmpManager.spawnShards(tmpManager);
|
||||
tmpManager.spawnShards(tmpManager);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
// TIMER TO KEEP CHECKING WHEN ALL SHARDS HAVE RESHARDED
|
||||
const timer = setInterval(async () => {
|
||||
const pending = await gateway.resharding.isPending(gateway, oldGateway);
|
||||
// STILL PENDING ON SOME SHARDS TO BE CREATED
|
||||
if (pending) return;
|
||||
return new Promise((resolve) => {
|
||||
// TIMER TO KEEP CHECKING WHEN ALL SHARDS HAVE RESHARDED
|
||||
const timer = setInterval(async () => {
|
||||
const pending = await gateway.resharding.isPending(gateway, oldGateway);
|
||||
// STILL PENDING ON SOME SHARDS TO BE CREATED
|
||||
if (pending) return;
|
||||
|
||||
// ENABLE EVENTS ON NEW SHARDS AND IGNORE EVENTS ON OLD
|
||||
const oldHandler = oldGateway.handleDiscordPayload;
|
||||
gateway.handleDiscordPayload = oldHandler;
|
||||
oldGateway.handleDiscordPayload = function (og, data, shardId) {
|
||||
// ALLOW EXCEPTION FOR CHUNKING TO PREVENT REQUESTS FREEZING
|
||||
if (data.t !== "GUILD_MEMBERS_CHUNK") return;
|
||||
oldHandler(og, data, shardId);
|
||||
};
|
||||
// ENABLE EVENTS ON NEW SHARDS AND IGNORE EVENTS ON OLD
|
||||
const oldHandler = oldGateway.handleDiscordPayload;
|
||||
gateway.handleDiscordPayload = oldHandler;
|
||||
oldGateway.handleDiscordPayload = function (og, data, shardId) {
|
||||
// ALLOW EXCEPTION FOR CHUNKING TO PREVENT REQUESTS FREEZING
|
||||
if (data.t !== "GUILD_MEMBERS_CHUNK") return;
|
||||
oldHandler(og, data, shardId);
|
||||
};
|
||||
|
||||
// STOP TIMER
|
||||
clearInterval(timer);
|
||||
await gateway.resharding.editGuildShardIds();
|
||||
await gateway.resharding.closeOldShards(oldGateway);
|
||||
gateway.debug("GW DEBUG", "[Resharding] Complete.");
|
||||
resolve(gateway);
|
||||
}, 30000);
|
||||
}) as Promise<GatewayManager>;
|
||||
// STOP TIMER
|
||||
clearInterval(timer);
|
||||
await gateway.resharding.editGuildShardIds();
|
||||
await gateway.resharding.closeOldShards(oldGateway);
|
||||
gateway.debug("GW DEBUG", "[Resharding] Complete.");
|
||||
resolve(gateway);
|
||||
}, 30000);
|
||||
}) as Promise<GatewayManager>;
|
||||
}
|
||||
|
||||
// /** The handler to automatically reshard when necessary. */
|
||||
@ -269,41 +269,41 @@ export async function reshard(resharder: Resharder, gatewayBot: GatewayBot) {
|
||||
|
||||
/** Handler that by default will check all new shards are online in the new gateway. The handler can be overridden if you have multiple servers to communicate through redis pubsub or whatever you prefer. */
|
||||
export async function resharderIsPending(
|
||||
gateway: GatewayManager,
|
||||
oldGateway: GatewayManager,
|
||||
gateway: GatewayManager,
|
||||
oldGateway: GatewayManager,
|
||||
) {
|
||||
for (let i = gateway.firstShardId; i < gateway.lastShardId; i++) {
|
||||
const shard = gateway.shards.get(i);
|
||||
if (!shard?.ready) {
|
||||
return true;
|
||||
for (let i = gateway.firstShardId; i < gateway.lastShardId; i++) {
|
||||
const shard = gateway.shards.get(i);
|
||||
if (!shard?.ready) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Handler that by default closes all shards in the old gateway. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
|
||||
export async function resharderCloseOldShards(oldGateway: GatewayManager) {
|
||||
// SHUT DOWN ALL SHARDS IF NOTHING IN QUEUE
|
||||
oldGateway.shards.forEach((shard) => {
|
||||
// CLOSE THIS SHARD IT HAS NO QUEUE
|
||||
if (!shard.processingQueue && !shard.queue.length) {
|
||||
return oldGateway.closeWS(
|
||||
shard.ws,
|
||||
3066,
|
||||
"Shard has been resharded. Closing shard since it has no queue.",
|
||||
);
|
||||
}
|
||||
// SHUT DOWN ALL SHARDS IF NOTHING IN QUEUE
|
||||
oldGateway.shards.forEach((shard) => {
|
||||
// CLOSE THIS SHARD IT HAS NO QUEUE
|
||||
if (!shard.processingQueue && !shard.queue.length) {
|
||||
return oldGateway.closeWS(
|
||||
shard.ws,
|
||||
3066,
|
||||
"Shard has been resharded. Closing shard since it has no queue.",
|
||||
);
|
||||
}
|
||||
|
||||
// IF QUEUE EXISTS GIVE IT 5 MINUTES TO COMPLETE
|
||||
setTimeout(() => {
|
||||
oldGateway.closeWS(
|
||||
shard.ws,
|
||||
3066,
|
||||
"Shard has been resharded. Delayed closing shard since it had a queue.",
|
||||
);
|
||||
}, 300000);
|
||||
});
|
||||
// IF QUEUE EXISTS GIVE IT 5 MINUTES TO COMPLETE
|
||||
setTimeout(() => {
|
||||
oldGateway.closeWS(
|
||||
shard.ws,
|
||||
3066,
|
||||
"Shard has been resharded. Delayed closing shard since it had a queue.",
|
||||
);
|
||||
}, 300000);
|
||||
});
|
||||
}
|
||||
|
||||
// /** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
|
||||
@ -330,10 +330,10 @@ export async function resharderCloseOldShards(oldGateway: GatewayManager) {
|
||||
|
||||
/** Handler that by default will save the new shard id for each guild this becomes ready in new gateway. This can be overridden to save the shard ids in a redis cache layer or whatever you prefer. These ids will be used later to update all guilds. */
|
||||
export async function markNewGuildShardId(guildIds: bigint[], shardId: number) {
|
||||
// PLACEHOLDER TO LET YOU MARK A GUILD ID AND SHARD ID FOR LATER USE ONCE RESHARDED
|
||||
// PLACEHOLDER TO LET YOU MARK A GUILD ID AND SHARD ID FOR LATER USE ONCE RESHARDED
|
||||
}
|
||||
|
||||
/** Handler that by default does not do anything since by default the library will not cache. */
|
||||
export async function reshardingEditGuildShardIds() {
|
||||
// PLACEHOLDER TO LET YOU UPDATE CACHED GUILDS
|
||||
// PLACEHOLDER TO LET YOU UPDATE CACHED GUILDS
|
||||
}
|
||||
|
178
vendor/gateway/manager/shardManager.ts
vendored
178
vendor/gateway/manager/shardManager.ts
vendored
@ -17,106 +17,106 @@ export type ShardManager = ReturnType<typeof createShardManager>;
|
||||
* The aim of this is to provide an easy to use manager which can be used by workers or any other kind of separate process.
|
||||
*/
|
||||
export function createShardManager(options: CreateShardManager) {
|
||||
return {
|
||||
return {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
|
||||
/** Options which are used to create a new Shard. */
|
||||
createShardOptions: {
|
||||
...options.createShardOptions,
|
||||
events: {
|
||||
...options.createShardOptions?.events,
|
||||
message: options.createShardOptions?.events?.message ?? options.handleMessage,
|
||||
},
|
||||
},
|
||||
/** Gateway configuration which is used when creating a Shard. */
|
||||
gatewayConfig: options.gatewayConfig,
|
||||
/** Managed Shards. */
|
||||
shards: new Collection(
|
||||
options.shardIds.map((shardId) => {
|
||||
const shard = createShard({
|
||||
...options.createShardOptions,
|
||||
id: shardId,
|
||||
totalShards: options.totalShards,
|
||||
gatewayConfig: options.gatewayConfig,
|
||||
requestIdentify: async function () {
|
||||
return await options.requestIdentify(shardId);
|
||||
},
|
||||
});
|
||||
|
||||
return [shardId, shard] as const;
|
||||
}),
|
||||
),
|
||||
/** Total amount of Shards used by the bot. */
|
||||
totalShards: options.totalShards,
|
||||
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
|
||||
/** Tell the manager to identify a Shard.
|
||||
* If this Shard is not already managed this will also add the Shard to the manager.
|
||||
*/
|
||||
identify: async function (shardId: number) {
|
||||
let shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
shard = createShard({
|
||||
...this.createShardOptions,
|
||||
id: shardId,
|
||||
totalShards: this.totalShards,
|
||||
gatewayConfig: this.gatewayConfig,
|
||||
requestIdentify: async function () {
|
||||
return await options.requestIdentify(shardId);
|
||||
},
|
||||
});
|
||||
|
||||
this.shards.set(shardId, shard);
|
||||
}
|
||||
|
||||
return await shard.identify();
|
||||
},
|
||||
|
||||
/** Kill a shard.
|
||||
* Close a shards connection to Discord's gateway (if any) and remove it from the manager.
|
||||
*/
|
||||
kill: async function (shardId: number) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) return;
|
||||
|
||||
this.shards.delete(shardId);
|
||||
return await shard.shutdown();
|
||||
},
|
||||
|
||||
/** This function communicates with the parent manager,
|
||||
* in order to know whether this manager is allowed to identify a new shard.
|
||||
*/
|
||||
requestIdentify: options.requestIdentify,
|
||||
};
|
||||
}
|
||||
|
||||
export interface CreateShardManager {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
|
||||
/** Options which are used to create a new Shard. */
|
||||
createShardOptions: {
|
||||
...options.createShardOptions,
|
||||
events: {
|
||||
...options.createShardOptions?.events,
|
||||
message: options.createShardOptions?.events?.message ?? options.handleMessage,
|
||||
},
|
||||
},
|
||||
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
|
||||
/** Gateway configuration which is used when creating a Shard. */
|
||||
gatewayConfig: options.gatewayConfig,
|
||||
/** Managed Shards. */
|
||||
shards: new Collection(
|
||||
options.shardIds.map((shardId) => {
|
||||
const shard = createShard({
|
||||
...options.createShardOptions,
|
||||
id: shardId,
|
||||
totalShards: options.totalShards,
|
||||
gatewayConfig: options.gatewayConfig,
|
||||
requestIdentify: async function () {
|
||||
return await options.requestIdentify(shardId);
|
||||
},
|
||||
});
|
||||
|
||||
return [shardId, shard] as const;
|
||||
}),
|
||||
),
|
||||
/** Total amount of Shards used by the bot. */
|
||||
totalShards: options.totalShards,
|
||||
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
|
||||
/** Ids of the Shards which should be managed. */
|
||||
shardIds: number[];
|
||||
/** Total amount of Shard used by the bot. */
|
||||
totalShards: number;
|
||||
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
|
||||
/** Tell the manager to identify a Shard.
|
||||
* If this Shard is not already managed this will also add the Shard to the manager.
|
||||
*/
|
||||
identify: async function (shardId: number) {
|
||||
let shard = this.shards.get(shardId);
|
||||
if (!shard) {
|
||||
shard = createShard({
|
||||
...this.createShardOptions,
|
||||
id: shardId,
|
||||
totalShards: this.totalShards,
|
||||
gatewayConfig: this.gatewayConfig,
|
||||
requestIdentify: async function () {
|
||||
return await options.requestIdentify(shardId);
|
||||
},
|
||||
});
|
||||
|
||||
this.shards.set(shardId, shard);
|
||||
}
|
||||
|
||||
return await shard.identify();
|
||||
},
|
||||
|
||||
/** Kill a shard.
|
||||
* Close a shards connection to Discord's gateway (if any) and remove it from the manager.
|
||||
*/
|
||||
kill: async function (shardId: number) {
|
||||
const shard = this.shards.get(shardId);
|
||||
if (!shard) return;
|
||||
|
||||
this.shards.delete(shardId);
|
||||
return await shard.shutdown();
|
||||
},
|
||||
/** This function is used when a shard receives any message from Discord. */
|
||||
handleMessage(shard: Shard, message: DiscordGatewayPayload): unknown;
|
||||
|
||||
/** This function communicates with the parent manager,
|
||||
* in order to know whether this manager is allowed to identify a new shard.
|
||||
* in order to know whether this manager is allowed to identify a new shard. #
|
||||
*/
|
||||
requestIdentify: options.requestIdentify,
|
||||
};
|
||||
}
|
||||
|
||||
export interface CreateShardManager {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
/** Options which are used to create a new Shard. */
|
||||
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
|
||||
/** Gateway configuration which is used when creating a Shard. */
|
||||
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
|
||||
/** Ids of the Shards which should be managed. */
|
||||
shardIds: number[];
|
||||
/** Total amount of Shard used by the bot. */
|
||||
totalShards: number;
|
||||
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
|
||||
/** This function is used when a shard receives any message from Discord. */
|
||||
handleMessage(shard: Shard, message: DiscordGatewayPayload): unknown;
|
||||
|
||||
/** This function communicates with the parent manager,
|
||||
* in order to know whether this manager is allowed to identify a new shard. #
|
||||
*/
|
||||
requestIdentify(shardId: number): Promise<void>;
|
||||
requestIdentify(shardId: number): Promise<void>;
|
||||
}
|
||||
|
38
vendor/gateway/manager/spawnShards.ts
vendored
38
vendor/gateway/manager/spawnShards.ts
vendored
@ -6,27 +6,27 @@ import { createGatewayManager, GatewayManager } from "./gatewayManager.ts";
|
||||
|
||||
/** Begin spawning shards. */
|
||||
export function spawnShards(gateway: GatewayManager) {
|
||||
// PREPARES THE MAX SHARD COUNT BY CONCURRENCY
|
||||
// if (manager.resharding.useOptimalLargeBotSharding) {
|
||||
// // gateway.debug("GW DEBUG", "[Spawning] Using optimal large bot sharding solution.");
|
||||
// manager.manager.totalShards = manager.calculateTotalShards(
|
||||
// manager,
|
||||
// );
|
||||
// }
|
||||
// PREPARES THE MAX SHARD COUNT BY CONCURRENCY
|
||||
// if (manager.resharding.useOptimalLargeBotSharding) {
|
||||
// // gateway.debug("GW DEBUG", "[Spawning] Using optimal large bot sharding solution.");
|
||||
// manager.manager.totalShards = manager.calculateTotalShards(
|
||||
// manager,
|
||||
// );
|
||||
// }
|
||||
|
||||
// PREPARES ALL SHARDS IN SPECIFIC BUCKETS
|
||||
gateway.prepareBuckets();
|
||||
// PREPARES ALL SHARDS IN SPECIFIC BUCKETS
|
||||
gateway.prepareBuckets();
|
||||
|
||||
// SPREAD THIS OUT TO DIFFERENT WORKERS TO BEGIN STARTING UP
|
||||
gateway.buckets.forEach(async (bucket, bucketId) => {
|
||||
// gateway.debug("GW DEBUG", `2. Running forEach loop in spawnShards function.`);
|
||||
// SPREAD THIS OUT TO DIFFERENT WORKERS TO BEGIN STARTING UP
|
||||
gateway.buckets.forEach(async (bucket, bucketId) => {
|
||||
// gateway.debug("GW DEBUG", `2. Running forEach loop in spawnShards function.`);
|
||||
|
||||
for (const worker of bucket.workers) {
|
||||
// gateway.debug("GW DEBUG", `3. Running for of loop in spawnShards function.`);
|
||||
for (const worker of bucket.workers) {
|
||||
// gateway.debug("GW DEBUG", `3. Running for of loop in spawnShards function.`);
|
||||
|
||||
for (const shardId of worker.queue) {
|
||||
await gateway.tellWorkerToIdentify(worker.id, shardId, bucketId);
|
||||
}
|
||||
}
|
||||
});
|
||||
for (const shardId of worker.queue) {
|
||||
await gateway.tellWorkerToIdentify(worker.id, shardId, bucketId);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
4
vendor/gateway/manager/stop.ts
vendored
4
vendor/gateway/manager/stop.ts
vendored
@ -2,7 +2,7 @@ import { delay } from "../../util/delay.ts";
|
||||
import { GatewayManager } from "./gatewayManager.ts";
|
||||
|
||||
export async function stop(gateway: GatewayManager, code: number, reason: string) {
|
||||
gateway.manager.shards.forEach((shard) => shard.close(code, reason));
|
||||
gateway.manager.shards.forEach((shard) => shard.close(code, reason));
|
||||
|
||||
await delay(5000);
|
||||
await delay(5000);
|
||||
}
|
||||
|
10
vendor/gateway/manager/tellWorkerToIdentify.ts
vendored
10
vendor/gateway/manager/tellWorkerToIdentify.ts
vendored
@ -4,10 +4,10 @@ import { GatewayManager } from "./gatewayManager.ts";
|
||||
|
||||
/** Allows users to hook in and change to communicate to different workers across different servers or anything they like. For example using redis pubsub to talk to other servers. */
|
||||
export async function tellWorkerToIdentify(
|
||||
gateway: GatewayManager,
|
||||
_workerId: number,
|
||||
shardId: number,
|
||||
_bucketId: number,
|
||||
gateway: GatewayManager,
|
||||
_workerId: number,
|
||||
shardId: number,
|
||||
_bucketId: number,
|
||||
): Promise<void> {
|
||||
return await gateway.manager.identify(shardId);
|
||||
return await gateway.manager.identify(shardId);
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
import { Shard } from "./types.ts";
|
||||
|
||||
export function calculateSafeRequests(shard: Shard) {
|
||||
// * 2 adds extra safety layer for discords OP 1 requests that we need to respond to
|
||||
const safeRequests = shard.maxRequestsPerRateLimitTick -
|
||||
Math.ceil(shard.rateLimitResetInterval / shard.heart.interval) * 2;
|
||||
// * 2 adds extra safety layer for discords OP 1 requests that we need to respond to
|
||||
const safeRequests = shard.maxRequestsPerRateLimitTick -
|
||||
Math.ceil(shard.rateLimitResetInterval / shard.heart.interval) * 2;
|
||||
|
||||
return safeRequests < 0 ? 0 : safeRequests;
|
||||
return safeRequests < 0 ? 0 : safeRequests;
|
||||
}
|
||||
|
4
vendor/gateway/shard/close.ts
vendored
4
vendor/gateway/shard/close.ts
vendored
@ -1,7 +1,7 @@
|
||||
import { Shard } from "./types.ts";
|
||||
|
||||
export function close(shard: Shard, code: number, reason: string): void {
|
||||
if (shard.socket?.readyState !== WebSocket.OPEN) return;
|
||||
if (shard.socket?.readyState !== WebSocket.OPEN) return;
|
||||
|
||||
return shard.socket?.close(code, reason);
|
||||
return shard.socket?.close(code, reason);
|
||||
}
|
||||
|
48
vendor/gateway/shard/connect.ts
vendored
48
vendor/gateway/shard/connect.ts
vendored
@ -1,34 +1,34 @@
|
||||
import { Shard, ShardState } from "./types.ts";
|
||||
|
||||
export async function connect(shard: Shard): Promise<void> {
|
||||
// Only set the shard to `Connecting` state,
|
||||
// if the connection request does not come from an identify or resume action.
|
||||
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
|
||||
shard.state = ShardState.Connecting;
|
||||
}
|
||||
shard.events.connecting?.(shard);
|
||||
// Only set the shard to `Connecting` state,
|
||||
// if the connection request does not come from an identify or resume action.
|
||||
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
|
||||
shard.state = ShardState.Connecting;
|
||||
}
|
||||
shard.events.connecting?.(shard);
|
||||
|
||||
// Explicitly setting the encoding to json, since we do not support ETF.
|
||||
const socket = new WebSocket(`${shard.gatewayConfig.url}/?v=${shard.gatewayConfig.version}&encoding=json`);
|
||||
shard.socket = socket;
|
||||
// Explicitly setting the encoding to json, since we do not support ETF.
|
||||
const socket = new WebSocket(`${shard.gatewayConfig.url}/?v=${shard.gatewayConfig.version}&encoding=json`);
|
||||
shard.socket = socket;
|
||||
|
||||
// TODO: proper event handling
|
||||
socket.onerror = (event) => console.log({ error: event });
|
||||
// TODO: proper event handling
|
||||
socket.onerror = (event) => console.log({ error: event });
|
||||
|
||||
socket.onclose = (event) => shard.handleClose(event);
|
||||
socket.onclose = (event) => shard.handleClose(event);
|
||||
|
||||
socket.onmessage = (message) => shard.handleMessage(message);
|
||||
socket.onmessage = (message) => shard.handleMessage(message);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
socket.onopen = () => {
|
||||
// Only set the shard to `Unidentified` state,
|
||||
// if the connection request does not come from an identify or resume action.
|
||||
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
|
||||
shard.state = ShardState.Unidentified;
|
||||
}
|
||||
shard.events.connected?.(shard);
|
||||
return new Promise((resolve) => {
|
||||
socket.onopen = () => {
|
||||
// Only set the shard to `Unidentified` state,
|
||||
// if the connection request does not come from an identify or resume action.
|
||||
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
|
||||
shard.state = ShardState.Unidentified;
|
||||
}
|
||||
shard.events.connected?.(shard);
|
||||
|
||||
resolve();
|
||||
};
|
||||
});
|
||||
resolve();
|
||||
};
|
||||
});
|
||||
}
|
||||
|
528
vendor/gateway/shard/createShard.ts
vendored
528
vendor/gateway/shard/createShard.ts
vendored
@ -1,16 +1,16 @@
|
||||
import { identify } from "./identify.ts";
|
||||
import { handleMessage } from "./handleMessage.ts";
|
||||
import {
|
||||
DEFAULT_HEARTBEAT_INTERVAL,
|
||||
GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
Shard,
|
||||
ShardEvents,
|
||||
ShardGatewayConfig,
|
||||
ShardHeart,
|
||||
ShardSocketCloseCodes,
|
||||
ShardSocketRequest,
|
||||
ShardState,
|
||||
DEFAULT_HEARTBEAT_INTERVAL,
|
||||
GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
Shard,
|
||||
ShardEvents,
|
||||
ShardGatewayConfig,
|
||||
ShardHeart,
|
||||
ShardSocketCloseCodes,
|
||||
ShardSocketRequest,
|
||||
ShardState,
|
||||
} from "./types.ts";
|
||||
import { startHeartbeating } from "./startHeartbeating.ts";
|
||||
import { stopHeartbeating } from "./stopHeartbeating.ts";
|
||||
@ -33,301 +33,301 @@ import { API_VERSION } from "../../util/constants.ts";
|
||||
|
||||
/** */
|
||||
export function createShard(
|
||||
options: CreateShard,
|
||||
options: CreateShard,
|
||||
) {
|
||||
// This is done for performance reasons
|
||||
const calculateSafeRequestsOverwritten = options.calculateSafeRequests ?? calculateSafeRequests;
|
||||
const closeOverwritten = options.close ?? close;
|
||||
const connectOverwritten = options.connect ?? connect;
|
||||
const identifyOverwritten = options.identify ?? identify;
|
||||
const sendOverwritten = options.send ?? send;
|
||||
const shutdownOverwritten = options.shutdown ?? shutdown;
|
||||
const resumeOverwritten = options.resume ?? resume;
|
||||
const handleCloseOverwritten = options.handleClose ?? handleClose;
|
||||
const handleMessageOverwritten = options.handleMessage ?? handleMessage;
|
||||
const isOpenOverwritten = options.isOpen ?? isOpen;
|
||||
const startHeartbeatingOverwritten = options.startHeartbeating ?? startHeartbeating;
|
||||
const stopHeartbeatingOverwritten = options.stopHeartbeating ?? stopHeartbeating;
|
||||
// This is done for performance reasons
|
||||
const calculateSafeRequestsOverwritten = options.calculateSafeRequests ?? calculateSafeRequests;
|
||||
const closeOverwritten = options.close ?? close;
|
||||
const connectOverwritten = options.connect ?? connect;
|
||||
const identifyOverwritten = options.identify ?? identify;
|
||||
const sendOverwritten = options.send ?? send;
|
||||
const shutdownOverwritten = options.shutdown ?? shutdown;
|
||||
const resumeOverwritten = options.resume ?? resume;
|
||||
const handleCloseOverwritten = options.handleClose ?? handleClose;
|
||||
const handleMessageOverwritten = options.handleMessage ?? handleMessage;
|
||||
const isOpenOverwritten = options.isOpen ?? isOpen;
|
||||
const startHeartbeatingOverwritten = options.startHeartbeating ?? startHeartbeating;
|
||||
const stopHeartbeatingOverwritten = options.stopHeartbeating ?? stopHeartbeating;
|
||||
|
||||
return {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
return {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
|
||||
/** The gateway configuration which is used to connect to Discord. */
|
||||
gatewayConfig: {
|
||||
compress: options.gatewayConfig.compress ?? false,
|
||||
intents: options.gatewayConfig.intents ?? 0,
|
||||
properties: {
|
||||
os: options.gatewayConfig?.properties?.os ?? Deno.build.os,
|
||||
browser: options.gatewayConfig?.properties?.browser ?? "Discordeno",
|
||||
device: options.gatewayConfig?.properties?.device ?? "Discordeno",
|
||||
},
|
||||
token: options.gatewayConfig.token,
|
||||
url: options.gatewayConfig.url ?? "wss://gateway.discord.gg",
|
||||
version: options.gatewayConfig.version ?? API_VERSION,
|
||||
} as ShardGatewayConfig,
|
||||
/** This contains all the heartbeat information */
|
||||
heart: {
|
||||
acknowledged: false,
|
||||
interval: DEFAULT_HEARTBEAT_INTERVAL,
|
||||
} as ShardHeart,
|
||||
/** Id of the shard. */
|
||||
id: options.id,
|
||||
/** The maximum of requests which can be send to discord per rate limit tick.
|
||||
* Typically this value should not be changed.
|
||||
*/
|
||||
maxRequestsPerRateLimitTick: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
/** The previous payload sequence number. */
|
||||
previousSequenceNumber: options.previousSequenceNumber || null,
|
||||
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
|
||||
rateLimitResetInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
/** Current session id of the shard if present. */
|
||||
sessionId: undefined as string | undefined,
|
||||
/** This contains the WebSocket connection to Discord, if currently connected. */
|
||||
socket: undefined as WebSocket | undefined,
|
||||
/** Current internal state of the shard. */
|
||||
state: ShardState.Offline,
|
||||
/** The total amount of shards which are used to communicate with Discord. */
|
||||
totalShards: options.totalShards,
|
||||
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
|
||||
/** The shard related event handlers. */
|
||||
events: options.events ?? {} as ShardEvents,
|
||||
|
||||
/** Calculate the amount of requests which can safely be made per rate limit interval,
|
||||
* before the gateway gets disconnected due to an exceeded rate limit.
|
||||
*/
|
||||
calculateSafeRequests: function () {
|
||||
return calculateSafeRequestsOverwritten(this);
|
||||
},
|
||||
|
||||
/** Close the socket connection to discord if present. */
|
||||
close: function (code: number, reason: string) {
|
||||
return closeOverwritten(this, code, reason);
|
||||
},
|
||||
|
||||
/** Connect the shard with the gateway and start heartbeating.
|
||||
* This will not identify the shard to the gateway.
|
||||
*/
|
||||
connect: async function () {
|
||||
return await connectOverwritten(this);
|
||||
},
|
||||
|
||||
/** Identify the shard to the gateway.
|
||||
* If not connected, this will also connect the shard to the gateway.
|
||||
*/
|
||||
identify: async function () {
|
||||
return await identifyOverwritten(this);
|
||||
},
|
||||
|
||||
/** Check whether the connection to Discord is currently open. */
|
||||
isOpen: function () {
|
||||
return isOpenOverwritten(this);
|
||||
},
|
||||
|
||||
/** Function which can be overwritten in order to get the shards presence. */
|
||||
// This function allows to be async, in case the devs create the presence based on eg. database values.
|
||||
// Passing the shard's id there to make it easier for the dev to use this function.
|
||||
makePresence: options.makePresence,
|
||||
|
||||
/** Attempt to resume the previous shards session with the gateway. */
|
||||
resume: async function () {
|
||||
return await resumeOverwritten(this);
|
||||
},
|
||||
|
||||
/** Send a message to Discord.
|
||||
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
|
||||
*/
|
||||
send: async function (message: ShardSocketRequest, highPriority: boolean = false) {
|
||||
return await sendOverwritten(this, message, highPriority);
|
||||
},
|
||||
|
||||
/** Shutdown the shard.
|
||||
* Forcefully disconnect the shard from Discord.
|
||||
* The shard may not attempt to reconnect with Discord.
|
||||
*/
|
||||
shutdown: async function () {
|
||||
return await shutdownOverwritten(this);
|
||||
},
|
||||
|
||||
/** @private Internal shard bucket.
|
||||
* Only access this if you know what you are doing.
|
||||
*
|
||||
* Bucket for handling shard request rate limits.
|
||||
*/
|
||||
bucket: createLeakyBucket({
|
||||
max: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
refillAmount: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
}),
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Handle a gateway connection close.
|
||||
*/
|
||||
handleClose: async function (close: CloseEvent) {
|
||||
return await handleCloseOverwritten(this, close);
|
||||
},
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Handle an incoming gateway message.
|
||||
*/
|
||||
handleMessage: async function (message: MessageEvent<any>) {
|
||||
return await handleMessageOverwritten(this, message);
|
||||
},
|
||||
|
||||
/** This function communicates with the management process, in order to know whether its free to identify. */
|
||||
requestIdentify: async function () {
|
||||
return await options.requestIdentify(this.id);
|
||||
},
|
||||
|
||||
/** @private Internal state.
|
||||
* Only use this if you know what you are doing.
|
||||
*
|
||||
* Cache for pending gateway requests which should have been send while the gateway went offline.
|
||||
*/
|
||||
offlineSendQueue: [] as ((_?: unknown) => void)[],
|
||||
|
||||
/** @private Internal shard map.
|
||||
* Only use this map if you know what you are doing.
|
||||
*
|
||||
* This is used to resolve internal waiting states.
|
||||
* Mapped by SelectedEvents => ResolveFunction
|
||||
*/
|
||||
resolves: new Map<"READY" | "RESUMED" | "INVALID_SESSION", (payload: DiscordGatewayPayload) => void>(),
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Start sending heartbeat payloads to Discord in the provided interval.
|
||||
*/
|
||||
startHeartbeating: function (interval: number) {
|
||||
return startHeartbeatingOverwritten(this, interval);
|
||||
},
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Stop the heartbeating process with discord.
|
||||
*/
|
||||
stopHeartbeating: function () {
|
||||
return stopHeartbeatingOverwritten(this);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export interface CreateShard {
|
||||
/** Id of the shard which should be created. */
|
||||
id: number;
|
||||
|
||||
/** Gateway configuration for the shard. */
|
||||
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
|
||||
|
||||
/** The gateway configuration which is used to connect to Discord. */
|
||||
gatewayConfig: {
|
||||
compress: options.gatewayConfig.compress ?? false,
|
||||
intents: options.gatewayConfig.intents ?? 0,
|
||||
properties: {
|
||||
os: options.gatewayConfig?.properties?.os ?? Deno.build.os,
|
||||
browser: options.gatewayConfig?.properties?.browser ?? "Discordeno",
|
||||
device: options.gatewayConfig?.properties?.device ?? "Discordeno",
|
||||
},
|
||||
token: options.gatewayConfig.token,
|
||||
url: options.gatewayConfig.url ?? "wss://gateway.discord.gg",
|
||||
version: options.gatewayConfig.version ?? API_VERSION,
|
||||
} as ShardGatewayConfig,
|
||||
/** This contains all the heartbeat information */
|
||||
heart: {
|
||||
acknowledged: false,
|
||||
interval: DEFAULT_HEARTBEAT_INTERVAL,
|
||||
} as ShardHeart,
|
||||
/** Id of the shard. */
|
||||
id: options.id,
|
||||
/** The maximum of requests which can be send to discord per rate limit tick.
|
||||
* Typically this value should not be changed.
|
||||
*/
|
||||
maxRequestsPerRateLimitTick: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
/** The previous payload sequence number. */
|
||||
previousSequenceNumber: options.previousSequenceNumber || null,
|
||||
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
|
||||
rateLimitResetInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
/** Current session id of the shard if present. */
|
||||
sessionId: undefined as string | undefined,
|
||||
/** This contains the WebSocket connection to Discord, if currently connected. */
|
||||
socket: undefined as WebSocket | undefined,
|
||||
/** Current internal state of the shard. */
|
||||
state: ShardState.Offline,
|
||||
/** The total amount of shards which are used to communicate with Discord. */
|
||||
totalShards: options.totalShards,
|
||||
totalShards: number;
|
||||
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
|
||||
/** The shard related event handlers. */
|
||||
events: options.events ?? {} as ShardEvents,
|
||||
/** This function communicates with the management process, in order to know whether its free to identify.
|
||||
* When this function resolves, this means that the shard is allowed to send an identify payload to discord.
|
||||
*/
|
||||
requestIdentify: (shardId: number) => Promise<void>;
|
||||
|
||||
/** Calculate the amount of requests which can safely be made per rate limit interval,
|
||||
* before the gateway gets disconnected due to an exceeded rate limit.
|
||||
*/
|
||||
calculateSafeRequests: function () {
|
||||
return calculateSafeRequestsOverwritten(this);
|
||||
},
|
||||
calculateSafeRequests?: typeof calculateSafeRequests;
|
||||
|
||||
/** Close the socket connection to discord if present. */
|
||||
close: function (code: number, reason: string) {
|
||||
return closeOverwritten(this, code, reason);
|
||||
},
|
||||
close?: typeof close;
|
||||
|
||||
/** Connect the shard with the gateway and start heartbeating.
|
||||
* This will not identify the shard to the gateway.
|
||||
*/
|
||||
connect: async function () {
|
||||
return await connectOverwritten(this);
|
||||
},
|
||||
|
||||
/** Identify the shard to the gateway.
|
||||
* If not connected, this will also connect the shard to the gateway.
|
||||
*/
|
||||
identify: async function () {
|
||||
return await identifyOverwritten(this);
|
||||
},
|
||||
|
||||
/** Check whether the connection to Discord is currently open. */
|
||||
isOpen: function () {
|
||||
return isOpenOverwritten(this);
|
||||
},
|
||||
|
||||
/** Function which can be overwritten in order to get the shards presence. */
|
||||
// This function allows to be async, in case the devs create the presence based on eg. database values.
|
||||
// Passing the shard's id there to make it easier for the dev to use this function.
|
||||
makePresence: options.makePresence,
|
||||
|
||||
/** Attempt to resume the previous shards session with the gateway. */
|
||||
resume: async function () {
|
||||
return await resumeOverwritten(this);
|
||||
},
|
||||
|
||||
/** Send a message to Discord.
|
||||
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
|
||||
*/
|
||||
send: async function (message: ShardSocketRequest, highPriority: boolean = false) {
|
||||
return await sendOverwritten(this, message, highPriority);
|
||||
},
|
||||
|
||||
/** Shutdown the shard.
|
||||
* Forcefully disconnect the shard from Discord.
|
||||
* The shard may not attempt to reconnect with Discord.
|
||||
*/
|
||||
shutdown: async function () {
|
||||
return await shutdownOverwritten(this);
|
||||
},
|
||||
|
||||
/** @private Internal shard bucket.
|
||||
* Only access this if you know what you are doing.
|
||||
*
|
||||
* Bucket for handling shard request rate limits.
|
||||
*/
|
||||
bucket: createLeakyBucket({
|
||||
max: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
refillAmount: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
|
||||
}),
|
||||
connect?: typeof connect;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Handle a gateway connection close.
|
||||
*/
|
||||
handleClose: async function (close: CloseEvent) {
|
||||
return await handleCloseOverwritten(this, close);
|
||||
},
|
||||
handleClose?: typeof handleClose;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Handle an incoming gateway message.
|
||||
*/
|
||||
handleMessage: async function (message: MessageEvent<any>) {
|
||||
return await handleMessageOverwritten(this, message);
|
||||
},
|
||||
handleMessage?: typeof handleMessage;
|
||||
|
||||
/** This function communicates with the management process, in order to know whether its free to identify. */
|
||||
requestIdentify: async function () {
|
||||
return await options.requestIdentify(this.id);
|
||||
},
|
||||
|
||||
/** @private Internal state.
|
||||
* Only use this if you know what you are doing.
|
||||
*
|
||||
* Cache for pending gateway requests which should have been send while the gateway went offline.
|
||||
/** Identify the shard to the gateway.
|
||||
* If not connected, this will also connect the shard to the gateway.
|
||||
*/
|
||||
offlineSendQueue: [] as ((_?: unknown) => void)[],
|
||||
identify?: typeof identify;
|
||||
|
||||
/** @private Internal shard map.
|
||||
* Only use this map if you know what you are doing.
|
||||
*
|
||||
* This is used to resolve internal waiting states.
|
||||
* Mapped by SelectedEvents => ResolveFunction
|
||||
/** Check whether the connection to Discord is currently open. */
|
||||
isOpen?: typeof isOpen;
|
||||
|
||||
/** Function which can be overwritten in order to get the shards presence. */
|
||||
makePresence?(shardId: number): Promise<DiscordStatusUpdate> | DiscordStatusUpdate;
|
||||
|
||||
/** The maximum of requests which can be send to discord per rate limit tick.
|
||||
* Typically this value should not be changed.
|
||||
*/
|
||||
resolves: new Map<"READY" | "RESUMED" | "INVALID_SESSION", (payload: DiscordGatewayPayload) => void>(),
|
||||
maxRequestsPerRateLimitTick?: number;
|
||||
|
||||
/** The previous payload sequence number. */
|
||||
previousSequenceNumber?: number;
|
||||
|
||||
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
|
||||
rateLimitResetInterval?: number;
|
||||
|
||||
/** Attempt to resume the previous shards session with the gateway. */
|
||||
resume?: typeof resume;
|
||||
|
||||
/** Send a message to Discord.
|
||||
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
|
||||
*/
|
||||
send?: typeof send;
|
||||
|
||||
/** Shutdown the shard.
|
||||
* Forcefully disconnect the shard from Discord.
|
||||
* The shard may not attempt to reconnect with Discord.
|
||||
*/
|
||||
shutdown?: typeof shutdown;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Start sending heartbeat payloads to Discord in the provided interval.
|
||||
*/
|
||||
startHeartbeating: function (interval: number) {
|
||||
return startHeartbeatingOverwritten(this, interval);
|
||||
},
|
||||
startHeartbeating?: typeof startHeartbeating;
|
||||
|
||||
/** Current internal state of the shard. */
|
||||
state?: ShardState;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Stop the heartbeating process with discord.
|
||||
*/
|
||||
stopHeartbeating: function () {
|
||||
return stopHeartbeatingOverwritten(this);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export interface CreateShard {
|
||||
/** Id of the shard which should be created. */
|
||||
id: number;
|
||||
|
||||
/** Gateway configuration for the shard. */
|
||||
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
|
||||
|
||||
/** The total amount of shards which are used to communicate with Discord. */
|
||||
totalShards: number;
|
||||
|
||||
/** This function communicates with the management process, in order to know whether its free to identify.
|
||||
* When this function resolves, this means that the shard is allowed to send an identify payload to discord.
|
||||
*/
|
||||
requestIdentify: (shardId: number) => Promise<void>;
|
||||
|
||||
/** Calculate the amount of requests which can safely be made per rate limit interval,
|
||||
* before the gateway gets disconnected due to an exceeded rate limit.
|
||||
*/
|
||||
calculateSafeRequests?: typeof calculateSafeRequests;
|
||||
|
||||
/** Close the socket connection to discord if present. */
|
||||
close?: typeof close;
|
||||
|
||||
/** Connect the shard with the gateway and start heartbeating.
|
||||
* This will not identify the shard to the gateway.
|
||||
*/
|
||||
connect?: typeof connect;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Handle a gateway connection close.
|
||||
*/
|
||||
handleClose?: typeof handleClose;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Handle an incoming gateway message.
|
||||
*/
|
||||
handleMessage?: typeof handleMessage;
|
||||
|
||||
/** Identify the shard to the gateway.
|
||||
* If not connected, this will also connect the shard to the gateway.
|
||||
*/
|
||||
identify?: typeof identify;
|
||||
|
||||
/** Check whether the connection to Discord is currently open. */
|
||||
isOpen?: typeof isOpen;
|
||||
|
||||
/** Function which can be overwritten in order to get the shards presence. */
|
||||
makePresence?(shardId: number): Promise<DiscordStatusUpdate> | DiscordStatusUpdate;
|
||||
|
||||
/** The maximum of requests which can be send to discord per rate limit tick.
|
||||
* Typically this value should not be changed.
|
||||
*/
|
||||
maxRequestsPerRateLimitTick?: number;
|
||||
|
||||
/** The previous payload sequence number. */
|
||||
previousSequenceNumber?: number;
|
||||
|
||||
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
|
||||
rateLimitResetInterval?: number;
|
||||
|
||||
/** Attempt to resume the previous shards session with the gateway. */
|
||||
resume?: typeof resume;
|
||||
|
||||
/** Send a message to Discord.
|
||||
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
|
||||
*/
|
||||
send?: typeof send;
|
||||
|
||||
/** Shutdown the shard.
|
||||
* Forcefully disconnect the shard from Discord.
|
||||
* The shard may not attempt to reconnect with Discord.
|
||||
*/
|
||||
shutdown?: typeof shutdown;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Start sending heartbeat payloads to Discord in the provided interval.
|
||||
*/
|
||||
startHeartbeating?: typeof startHeartbeating;
|
||||
|
||||
/** Current internal state of the shard. */
|
||||
state?: ShardState;
|
||||
|
||||
/** @private Internal shard function.
|
||||
* Only use this function if you know what you are doing.
|
||||
*
|
||||
* Stop the heartbeating process with discord.
|
||||
*/
|
||||
stopHeartbeating?: typeof stopHeartbeating;
|
||||
|
||||
/** The shard related event handlers. */
|
||||
events?: ShardEvents;
|
||||
/** This contains all the heartbeat information */
|
||||
heart?: ShardHeart;
|
||||
/** Bucket for handling shard request rate limits. */
|
||||
bucket?: LeakyBucket;
|
||||
/** Cache for pending gateway requests which should have been send while the gateway went offline. */
|
||||
offlineSendQueue?: ShardSocketRequest[];
|
||||
/** This is used to resolve internal waiting states.
|
||||
* Mapped by SelectedEvents => ResolveFunction
|
||||
*/
|
||||
resolves?: Shard["resolves"];
|
||||
stopHeartbeating?: typeof stopHeartbeating;
|
||||
|
||||
/** The shard related event handlers. */
|
||||
events?: ShardEvents;
|
||||
/** This contains all the heartbeat information */
|
||||
heart?: ShardHeart;
|
||||
/** Bucket for handling shard request rate limits. */
|
||||
bucket?: LeakyBucket;
|
||||
/** Cache for pending gateway requests which should have been send while the gateway went offline. */
|
||||
offlineSendQueue?: ShardSocketRequest[];
|
||||
/** This is used to resolve internal waiting states.
|
||||
* Mapped by SelectedEvents => ResolveFunction
|
||||
*/
|
||||
resolves?: Shard["resolves"];
|
||||
}
|
||||
|
108
vendor/gateway/shard/handleClose.ts
vendored
108
vendor/gateway/shard/handleClose.ts
vendored
@ -2,62 +2,62 @@ import { GatewayCloseEventCodes } from "../../types/shared.ts";
|
||||
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
|
||||
|
||||
export async function handleClose(shard: Shard, close: CloseEvent): Promise<void> {
|
||||
// gateway.debug("GW CLOSED", { shardId, payload: event });
|
||||
// gateway.debug("GW CLOSED", { shardId, payload: event });
|
||||
|
||||
shard.stopHeartbeating();
|
||||
shard.stopHeartbeating();
|
||||
|
||||
switch (close.code) {
|
||||
case ShardSocketCloseCodes.TestingFinished: {
|
||||
shard.state = ShardState.Offline;
|
||||
shard.events.disconnected?.(shard);
|
||||
switch (close.code) {
|
||||
case ShardSocketCloseCodes.TestingFinished: {
|
||||
shard.state = ShardState.Offline;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
// On these codes a manual start will be done.
|
||||
case ShardSocketCloseCodes.Shutdown:
|
||||
case ShardSocketCloseCodes.ReIdentifying:
|
||||
case ShardSocketCloseCodes.Resharded:
|
||||
case ShardSocketCloseCodes.ResumeClosingOldConnection:
|
||||
case ShardSocketCloseCodes.ZombiedConnection: {
|
||||
shard.state = ShardState.Disconnected;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
// gateway.debug("GW CLOSED_RECONNECT", { shardId, payload: event });
|
||||
return;
|
||||
}
|
||||
// Gateway connection closes which require a new identify.
|
||||
case GatewayCloseEventCodes.UnknownOpcode:
|
||||
case GatewayCloseEventCodes.NotAuthenticated:
|
||||
case GatewayCloseEventCodes.InvalidSeq:
|
||||
case GatewayCloseEventCodes.RateLimited:
|
||||
case GatewayCloseEventCodes.SessionTimedOut: {
|
||||
shard.state = ShardState.Identifying;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
return await shard.identify();
|
||||
}
|
||||
// When these codes are received something went really wrong.
|
||||
// On those we cannot start a reconnect attempt.
|
||||
case GatewayCloseEventCodes.AuthenticationFailed:
|
||||
case GatewayCloseEventCodes.InvalidShard:
|
||||
case GatewayCloseEventCodes.ShardingRequired:
|
||||
case GatewayCloseEventCodes.InvalidApiVersion:
|
||||
case GatewayCloseEventCodes.InvalidIntents:
|
||||
case GatewayCloseEventCodes.DisallowedIntents: {
|
||||
shard.state = ShardState.Offline;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
throw new Error(close.reason || "Discord gave no reason! GG! You broke Discord!");
|
||||
}
|
||||
// Gateway connection closes on which a resume is allowed.
|
||||
case GatewayCloseEventCodes.UnknownError:
|
||||
case GatewayCloseEventCodes.DecodeError:
|
||||
case GatewayCloseEventCodes.AlreadyAuthenticated:
|
||||
default: {
|
||||
shard.state = ShardState.Resuming;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
return await shard.resume();
|
||||
}
|
||||
}
|
||||
// On these codes a manual start will be done.
|
||||
case ShardSocketCloseCodes.Shutdown:
|
||||
case ShardSocketCloseCodes.ReIdentifying:
|
||||
case ShardSocketCloseCodes.Resharded:
|
||||
case ShardSocketCloseCodes.ResumeClosingOldConnection:
|
||||
case ShardSocketCloseCodes.ZombiedConnection: {
|
||||
shard.state = ShardState.Disconnected;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
// gateway.debug("GW CLOSED_RECONNECT", { shardId, payload: event });
|
||||
return;
|
||||
}
|
||||
// Gateway connection closes which require a new identify.
|
||||
case GatewayCloseEventCodes.UnknownOpcode:
|
||||
case GatewayCloseEventCodes.NotAuthenticated:
|
||||
case GatewayCloseEventCodes.InvalidSeq:
|
||||
case GatewayCloseEventCodes.RateLimited:
|
||||
case GatewayCloseEventCodes.SessionTimedOut: {
|
||||
shard.state = ShardState.Identifying;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
return await shard.identify();
|
||||
}
|
||||
// When these codes are received something went really wrong.
|
||||
// On those we cannot start a reconnect attempt.
|
||||
case GatewayCloseEventCodes.AuthenticationFailed:
|
||||
case GatewayCloseEventCodes.InvalidShard:
|
||||
case GatewayCloseEventCodes.ShardingRequired:
|
||||
case GatewayCloseEventCodes.InvalidApiVersion:
|
||||
case GatewayCloseEventCodes.InvalidIntents:
|
||||
case GatewayCloseEventCodes.DisallowedIntents: {
|
||||
shard.state = ShardState.Offline;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
throw new Error(close.reason || "Discord gave no reason! GG! You broke Discord!");
|
||||
}
|
||||
// Gateway connection closes on which a resume is allowed.
|
||||
case GatewayCloseEventCodes.UnknownError:
|
||||
case GatewayCloseEventCodes.DecodeError:
|
||||
case GatewayCloseEventCodes.AlreadyAuthenticated:
|
||||
default: {
|
||||
shard.state = ShardState.Resuming;
|
||||
shard.events.disconnected?.(shard);
|
||||
|
||||
return await shard.resume();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
264
vendor/gateway/shard/handleMessage.ts
vendored
264
vendor/gateway/shard/handleMessage.ts
vendored
@ -8,149 +8,149 @@ import { GATEWAY_RATE_LIMIT_RESET_INTERVAL, Shard, ShardState } from "./types.ts
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
export async function handleMessage(shard: Shard, message: MessageEvent<any>): Promise<void> {
|
||||
message = message.data;
|
||||
message = message.data;
|
||||
|
||||
// If message compression is enabled,
|
||||
// Discord might send zlib compressed payloads.
|
||||
if (shard.gatewayConfig.compress && message instanceof Blob) {
|
||||
message = decompressWith(
|
||||
new Uint8Array(await message.arrayBuffer()),
|
||||
0,
|
||||
(slice: Uint8Array) => decoder.decode(slice),
|
||||
);
|
||||
}
|
||||
|
||||
// Safeguard incase decompression failed to make a string.
|
||||
if (typeof message !== "string") return;
|
||||
|
||||
const messageData = JSON.parse(message) as DiscordGatewayPayload;
|
||||
// gateway.debug("GW RAW", { shardId, payload: messageData });
|
||||
|
||||
// TODO: remove
|
||||
// console.log({ messageData: censor(messageData) });
|
||||
|
||||
switch (messageData.op) {
|
||||
case GatewayOpcodes.Heartbeat: {
|
||||
// TODO: can this actually happen
|
||||
if (!shard.isOpen()) return;
|
||||
|
||||
shard.heart.lastBeat = Date.now();
|
||||
// Discord randomly sends this requiring an immediate heartbeat back.
|
||||
// Using a direct socket.send call here because heartbeat requests are reserved by us.
|
||||
shard.socket?.send(
|
||||
JSON.stringify({
|
||||
op: GatewayOpcodes.Heartbeat,
|
||||
d: shard.previousSequenceNumber,
|
||||
}),
|
||||
);
|
||||
shard.events.heartbeat?.(shard);
|
||||
|
||||
break;
|
||||
// If message compression is enabled,
|
||||
// Discord might send zlib compressed payloads.
|
||||
if (shard.gatewayConfig.compress && message instanceof Blob) {
|
||||
message = decompressWith(
|
||||
new Uint8Array(await message.arrayBuffer()),
|
||||
0,
|
||||
(slice: Uint8Array) => decoder.decode(slice),
|
||||
);
|
||||
}
|
||||
case GatewayOpcodes.Hello: {
|
||||
const interval = (messageData.d as DiscordHello).heartbeat_interval;
|
||||
|
||||
shard.startHeartbeating(interval);
|
||||
// Safeguard incase decompression failed to make a string.
|
||||
if (typeof message !== "string") return;
|
||||
|
||||
if (shard.state !== ShardState.Resuming) {
|
||||
// HELLO has been send on a non resume action.
|
||||
// This means that the shard starts a new session,
|
||||
// therefore the rate limit interval has been reset too.
|
||||
shard.bucket = createLeakyBucket({
|
||||
max: shard.calculateSafeRequests(),
|
||||
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
refillAmount: shard.calculateSafeRequests(),
|
||||
// Waiting acquires should not be lost on a re-identify.
|
||||
waiting: shard.bucket.waiting,
|
||||
});
|
||||
}
|
||||
const messageData = JSON.parse(message) as DiscordGatewayPayload;
|
||||
// gateway.debug("GW RAW", { shardId, payload: messageData });
|
||||
|
||||
shard.events.hello?.(shard);
|
||||
// TODO: remove
|
||||
// console.log({ messageData: censor(messageData) });
|
||||
|
||||
break;
|
||||
switch (messageData.op) {
|
||||
case GatewayOpcodes.Heartbeat: {
|
||||
// TODO: can this actually happen
|
||||
if (!shard.isOpen()) return;
|
||||
|
||||
shard.heart.lastBeat = Date.now();
|
||||
// Discord randomly sends this requiring an immediate heartbeat back.
|
||||
// Using a direct socket.send call here because heartbeat requests are reserved by us.
|
||||
shard.socket?.send(
|
||||
JSON.stringify({
|
||||
op: GatewayOpcodes.Heartbeat,
|
||||
d: shard.previousSequenceNumber,
|
||||
}),
|
||||
);
|
||||
shard.events.heartbeat?.(shard);
|
||||
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes.Hello: {
|
||||
const interval = (messageData.d as DiscordHello).heartbeat_interval;
|
||||
|
||||
shard.startHeartbeating(interval);
|
||||
|
||||
if (shard.state !== ShardState.Resuming) {
|
||||
// HELLO has been send on a non resume action.
|
||||
// This means that the shard starts a new session,
|
||||
// therefore the rate limit interval has been reset too.
|
||||
shard.bucket = createLeakyBucket({
|
||||
max: shard.calculateSafeRequests(),
|
||||
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
|
||||
refillAmount: shard.calculateSafeRequests(),
|
||||
// Waiting acquires should not be lost on a re-identify.
|
||||
waiting: shard.bucket.waiting,
|
||||
});
|
||||
}
|
||||
|
||||
shard.events.hello?.(shard);
|
||||
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes.HeartbeatACK: {
|
||||
shard.heart.acknowledged = true;
|
||||
shard.heart.lastAck = Date.now();
|
||||
// Manually calculating the round trip time for users who need it.
|
||||
if (shard.heart.lastBeat) {
|
||||
shard.heart.rtt = shard.heart.lastAck - shard.heart.lastBeat;
|
||||
}
|
||||
|
||||
shard.events.heartbeatAck?.(shard);
|
||||
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes.Reconnect: {
|
||||
// gateway.debug("GW RECONNECT", { shardId });
|
||||
|
||||
shard.events.requestedReconnect?.(shard);
|
||||
|
||||
await shard.resume();
|
||||
|
||||
break;
|
||||
}
|
||||
case GatewayOpcodes.InvalidSession: {
|
||||
// gateway.debug("GW INVALID_SESSION", { shardId, payload: messageData });
|
||||
const resumable = messageData.d as boolean;
|
||||
|
||||
shard.events.invalidSession?.(shard, resumable);
|
||||
|
||||
// We need to wait for a random amount of time between 1 and 5
|
||||
// Reference: https://discord.com/developers/docs/topics/gateway#resuming
|
||||
await delay(Math.floor((Math.random() * 4 + 1) * 1000));
|
||||
|
||||
shard.resolves.get("INVALID_SESSION")?.(messageData);
|
||||
shard.resolves.delete("INVALID_SESSION");
|
||||
|
||||
// When resumable is false we need to re-identify
|
||||
if (!resumable) {
|
||||
await shard.identify();
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// The session is invalid but apparently it is resumable
|
||||
await shard.resume();
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
case GatewayOpcodes.HeartbeatACK: {
|
||||
shard.heart.acknowledged = true;
|
||||
shard.heart.lastAck = Date.now();
|
||||
// Manually calculating the round trip time for users who need it.
|
||||
if (shard.heart.lastBeat) {
|
||||
shard.heart.rtt = shard.heart.lastAck - shard.heart.lastBeat;
|
||||
}
|
||||
|
||||
shard.events.heartbeatAck?.(shard);
|
||||
if (messageData.t === "RESUMED") {
|
||||
// gateway.debug("GW RESUMED", { shardId });
|
||||
|
||||
break;
|
||||
shard.state = ShardState.Connected;
|
||||
shard.events.resumed?.(shard);
|
||||
|
||||
// Continue the requests which have been queued since the shard went offline.
|
||||
shard.offlineSendQueue.map((resolve) => resolve());
|
||||
|
||||
shard.resolves.get("RESUMED")?.(messageData);
|
||||
shard.resolves.delete("RESUMED");
|
||||
} // Important for future resumes.
|
||||
else if (messageData.t === "READY") {
|
||||
const payload = messageData.d as DiscordReady;
|
||||
|
||||
shard.sessionId = payload.session_id;
|
||||
shard.state = ShardState.Connected;
|
||||
|
||||
// Continue the requests which have been queued since the shard went offline.
|
||||
// Important when this is a re-identify
|
||||
shard.offlineSendQueue.map((resolve) => resolve());
|
||||
|
||||
shard.resolves.get("READY")?.(messageData);
|
||||
shard.resolves.delete("READY");
|
||||
}
|
||||
case GatewayOpcodes.Reconnect: {
|
||||
// gateway.debug("GW RECONNECT", { shardId });
|
||||
|
||||
shard.events.requestedReconnect?.(shard);
|
||||
|
||||
await shard.resume();
|
||||
|
||||
break;
|
||||
// Update the sequence number if it is present
|
||||
// `s` can be either `null` or a `number`.
|
||||
// In order to prevent update misses when `s` is `0` we check against null.
|
||||
if (messageData.s !== null) {
|
||||
shard.previousSequenceNumber = messageData.s;
|
||||
}
|
||||
case GatewayOpcodes.InvalidSession: {
|
||||
// gateway.debug("GW INVALID_SESSION", { shardId, payload: messageData });
|
||||
const resumable = messageData.d as boolean;
|
||||
|
||||
shard.events.invalidSession?.(shard, resumable);
|
||||
|
||||
// We need to wait for a random amount of time between 1 and 5
|
||||
// Reference: https://discord.com/developers/docs/topics/gateway#resuming
|
||||
await delay(Math.floor((Math.random() * 4 + 1) * 1000));
|
||||
|
||||
shard.resolves.get("INVALID_SESSION")?.(messageData);
|
||||
shard.resolves.delete("INVALID_SESSION");
|
||||
|
||||
// When resumable is false we need to re-identify
|
||||
if (!resumable) {
|
||||
await shard.identify();
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// The session is invalid but apparently it is resumable
|
||||
await shard.resume();
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (messageData.t === "RESUMED") {
|
||||
// gateway.debug("GW RESUMED", { shardId });
|
||||
|
||||
shard.state = ShardState.Connected;
|
||||
shard.events.resumed?.(shard);
|
||||
|
||||
// Continue the requests which have been queued since the shard went offline.
|
||||
shard.offlineSendQueue.map((resolve) => resolve());
|
||||
|
||||
shard.resolves.get("RESUMED")?.(messageData);
|
||||
shard.resolves.delete("RESUMED");
|
||||
} // Important for future resumes.
|
||||
else if (messageData.t === "READY") {
|
||||
const payload = messageData.d as DiscordReady;
|
||||
|
||||
shard.sessionId = payload.session_id;
|
||||
shard.state = ShardState.Connected;
|
||||
|
||||
// Continue the requests which have been queued since the shard went offline.
|
||||
// Important when this is a re-identify
|
||||
shard.offlineSendQueue.map((resolve) => resolve());
|
||||
|
||||
shard.resolves.get("READY")?.(messageData);
|
||||
shard.resolves.delete("READY");
|
||||
}
|
||||
|
||||
// Update the sequence number if it is present
|
||||
// `s` can be either `null` or a `number`.
|
||||
// In order to prevent update misses when `s` is `0` we check against null.
|
||||
if (messageData.s !== null) {
|
||||
shard.previousSequenceNumber = messageData.s;
|
||||
}
|
||||
|
||||
// The necessary handling required for the Shards connection has been finished.
|
||||
// Now the event can be safely forwarded.
|
||||
shard.events.message?.(shard, messageData);
|
||||
// The necessary handling required for the Shards connection has been finished.
|
||||
// Now the event can be safely forwarded.
|
||||
shard.events.message?.(shard, messageData);
|
||||
}
|
||||
|
78
vendor/gateway/shard/identify.ts
vendored
78
vendor/gateway/shard/identify.ts
vendored
@ -2,49 +2,49 @@ import { GatewayOpcodes } from "../../types/shared.ts";
|
||||
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
|
||||
|
||||
export async function identify(shard: Shard): Promise<void> {
|
||||
// A new identify has been requested even though there is already a connection open.
|
||||
// Therefore we need to close the old connection and heartbeating before creating a new one.
|
||||
if (shard.state === ShardState.Connected) {
|
||||
console.log("CLOSING EXISTING SHARD: #" + shard.id);
|
||||
shard.close(ShardSocketCloseCodes.ReIdentifying, "Re-identifying closure of old connection.");
|
||||
}
|
||||
// A new identify has been requested even though there is already a connection open.
|
||||
// Therefore we need to close the old connection and heartbeating before creating a new one.
|
||||
if (shard.state === ShardState.Connected) {
|
||||
console.log("CLOSING EXISTING SHARD: #" + shard.id);
|
||||
shard.close(ShardSocketCloseCodes.ReIdentifying, "Re-identifying closure of old connection.");
|
||||
}
|
||||
|
||||
shard.state = ShardState.Identifying;
|
||||
shard.events.identifying?.(shard);
|
||||
shard.state = ShardState.Identifying;
|
||||
shard.events.identifying?.(shard);
|
||||
|
||||
// It is possible that the shard is in Heartbeating state but not identified,
|
||||
// so check whether there is already a gateway connection existing.
|
||||
// If not we need to create one before we identify.
|
||||
if (!shard.isOpen()) {
|
||||
await shard.connect();
|
||||
}
|
||||
// It is possible that the shard is in Heartbeating state but not identified,
|
||||
// so check whether there is already a gateway connection existing.
|
||||
// If not we need to create one before we identify.
|
||||
if (!shard.isOpen()) {
|
||||
await shard.connect();
|
||||
}
|
||||
|
||||
// Wait until an identify is free for this shard.
|
||||
await shard.requestIdentify();
|
||||
// Wait until an identify is free for this shard.
|
||||
await shard.requestIdentify();
|
||||
|
||||
shard.send({
|
||||
op: GatewayOpcodes.Identify,
|
||||
d: {
|
||||
token: `Bot ${shard.gatewayConfig.token}`,
|
||||
compress: shard.gatewayConfig.compress,
|
||||
properties: shard.gatewayConfig.properties,
|
||||
intents: shard.gatewayConfig.intents,
|
||||
shard: [shard.id, shard.totalShards],
|
||||
presence: await shard.makePresence?.(shard.id),
|
||||
},
|
||||
}, true);
|
||||
shard.send({
|
||||
op: GatewayOpcodes.Identify,
|
||||
d: {
|
||||
token: `Bot ${shard.gatewayConfig.token}`,
|
||||
compress: shard.gatewayConfig.compress,
|
||||
properties: shard.gatewayConfig.properties,
|
||||
intents: shard.gatewayConfig.intents,
|
||||
shard: [shard.id, shard.totalShards],
|
||||
presence: await shard.makePresence?.(shard.id),
|
||||
},
|
||||
}, true);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
shard.resolves.set("READY", () => {
|
||||
shard.events.identified?.(shard);
|
||||
resolve();
|
||||
return new Promise((resolve) => {
|
||||
shard.resolves.set("READY", () => {
|
||||
shard.events.identified?.(shard);
|
||||
resolve();
|
||||
});
|
||||
// When identifying too fast,
|
||||
// Discord sends an invalid session payload.
|
||||
// This can safely be ignored though and the shard starts a new identify action.
|
||||
shard.resolves.set("INVALID_SESSION", () => {
|
||||
shard.resolves.delete("READY");
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
// When identifying too fast,
|
||||
// Discord sends an invalid session payload.
|
||||
// This can safely be ignored though and the shard starts a new identify action.
|
||||
shard.resolves.set("INVALID_SESSION", () => {
|
||||
shard.resolves.delete("READY");
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
2
vendor/gateway/shard/isOpen.ts
vendored
2
vendor/gateway/shard/isOpen.ts
vendored
@ -1,5 +1,5 @@
|
||||
import { Shard } from "./types.ts";
|
||||
|
||||
export function isOpen(shard: Shard): boolean {
|
||||
return shard.socket?.readyState === WebSocket.OPEN;
|
||||
return shard.socket?.readyState === WebSocket.OPEN;
|
||||
}
|
||||
|
73
vendor/gateway/shard/resume.ts
vendored
73
vendor/gateway/shard/resume.ts
vendored
@ -2,47 +2,50 @@ import { GatewayOpcodes } from "../../types/shared.ts";
|
||||
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
|
||||
|
||||
export async function resume(shard: Shard): Promise<void> {
|
||||
// gateway.debug("GW RESUMING", { shardId });
|
||||
// It has been requested to resume the Shards session.
|
||||
// It's possible that the shard is still connected with Discord's gateway therefore we need to forcefully close it.
|
||||
if (shard.isOpen()) {
|
||||
shard.close(ShardSocketCloseCodes.ResumeClosingOldConnection, "Reconnecting the shard, closing old connection.");
|
||||
}
|
||||
// gateway.debug("GW RESUMING", { shardId });
|
||||
// It has been requested to resume the Shards session.
|
||||
// It's possible that the shard is still connected with Discord's gateway therefore we need to forcefully close it.
|
||||
if (shard.isOpen()) {
|
||||
shard.close(
|
||||
ShardSocketCloseCodes.ResumeClosingOldConnection,
|
||||
"Reconnecting the shard, closing old connection.",
|
||||
);
|
||||
}
|
||||
|
||||
// Shard has never identified, so we cannot resume.
|
||||
if (!shard.sessionId) {
|
||||
// gateway.debug(
|
||||
// "GW DEBUG",
|
||||
// `[Error] Trying to resume a shard (id: ${shardId}) that was not first identified.`,
|
||||
// );
|
||||
// Shard has never identified, so we cannot resume.
|
||||
if (!shard.sessionId) {
|
||||
// gateway.debug(
|
||||
// "GW DEBUG",
|
||||
// `[Error] Trying to resume a shard (id: ${shardId}) that was not first identified.`,
|
||||
// );
|
||||
|
||||
return await shard.identify();
|
||||
return await shard.identify();
|
||||
|
||||
// throw new Error(`[SHARD] Trying to resume a shard (id: ${shard.id}) which was never identified`);
|
||||
}
|
||||
// throw new Error(`[SHARD] Trying to resume a shard (id: ${shard.id}) which was never identified`);
|
||||
}
|
||||
|
||||
shard.state = ShardState.Resuming;
|
||||
shard.state = ShardState.Resuming;
|
||||
|
||||
// Before we can resume, we need to create a new connection with Discord's gateway.
|
||||
await shard.connect();
|
||||
// Before we can resume, we need to create a new connection with Discord's gateway.
|
||||
await shard.connect();
|
||||
|
||||
shard.send({
|
||||
op: GatewayOpcodes.Resume,
|
||||
d: {
|
||||
token: `Bot ${shard.gatewayConfig.token}`,
|
||||
session_id: shard.sessionId,
|
||||
seq: shard.previousSequenceNumber ?? 0,
|
||||
},
|
||||
}, true);
|
||||
shard.send({
|
||||
op: GatewayOpcodes.Resume,
|
||||
d: {
|
||||
token: `Bot ${shard.gatewayConfig.token}`,
|
||||
session_id: shard.sessionId,
|
||||
seq: shard.previousSequenceNumber ?? 0,
|
||||
},
|
||||
}, true);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
shard.resolves.set("RESUMED", () => resolve());
|
||||
// If it is attempted to resume with an invalid session id,
|
||||
// Discord sends an invalid session payload
|
||||
// Not erroring here since it is easy that this happens, also it would be not catchable
|
||||
shard.resolves.set("INVALID_SESSION", () => {
|
||||
shard.resolves.delete("RESUMED");
|
||||
resolve();
|
||||
return new Promise((resolve) => {
|
||||
shard.resolves.set("RESUMED", () => resolve());
|
||||
// If it is attempted to resume with an invalid session id,
|
||||
// Discord sends an invalid session payload
|
||||
// Not erroring here since it is easy that this happens, also it would be not catchable
|
||||
shard.resolves.set("INVALID_SESSION", () => {
|
||||
shard.resolves.delete("RESUMED");
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
34
vendor/gateway/shard/send.ts
vendored
34
vendor/gateway/shard/send.ts
vendored
@ -1,27 +1,27 @@
|
||||
import { Shard, ShardSocketRequest } from "./types.ts";
|
||||
|
||||
async function checkOffline(shard: Shard, highPriority: boolean): Promise<void> {
|
||||
if (!shard.isOpen()) {
|
||||
await new Promise((resolve) => {
|
||||
if (highPriority) {
|
||||
// Higher priority requests get added at the beginning of the array.
|
||||
shard.offlineSendQueue.unshift(resolve);
|
||||
} else {
|
||||
shard.offlineSendQueue.push(resolve);
|
||||
}
|
||||
});
|
||||
}
|
||||
if (!shard.isOpen()) {
|
||||
await new Promise((resolve) => {
|
||||
if (highPriority) {
|
||||
// Higher priority requests get added at the beginning of the array.
|
||||
shard.offlineSendQueue.unshift(resolve);
|
||||
} else {
|
||||
shard.offlineSendQueue.push(resolve);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function send(shard: Shard, message: ShardSocketRequest, highPriority: boolean): Promise<void> {
|
||||
// Before acquiring a token from the bucket, check whether the shard is currently offline or not.
|
||||
// Else bucket and token wait time just get wasted.
|
||||
await checkOffline(shard, highPriority);
|
||||
// Before acquiring a token from the bucket, check whether the shard is currently offline or not.
|
||||
// Else bucket and token wait time just get wasted.
|
||||
await checkOffline(shard, highPriority);
|
||||
|
||||
await shard.bucket.acquire(1, highPriority);
|
||||
await shard.bucket.acquire(1, highPriority);
|
||||
|
||||
// It's possible, that the shard went offline after a token has been acquired from the bucket.
|
||||
await checkOffline(shard, highPriority);
|
||||
// It's possible, that the shard went offline after a token has been acquired from the bucket.
|
||||
await checkOffline(shard, highPriority);
|
||||
|
||||
shard.socket?.send(JSON.stringify(message));
|
||||
shard.socket?.send(JSON.stringify(message));
|
||||
}
|
||||
|
4
vendor/gateway/shard/shutdown.ts
vendored
4
vendor/gateway/shard/shutdown.ts
vendored
@ -1,6 +1,6 @@
|
||||
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
|
||||
|
||||
export async function shutdown(shard: Shard): Promise<void> {
|
||||
shard.close(ShardSocketCloseCodes.Shutdown, "Shard shutting down.");
|
||||
shard.state = ShardState.Offline;
|
||||
shard.close(ShardSocketCloseCodes.Shutdown, "Shard shutting down.");
|
||||
shard.state = ShardState.Offline;
|
||||
}
|
||||
|
94
vendor/gateway/shard/startHeartbeating.ts
vendored
94
vendor/gateway/shard/startHeartbeating.ts
vendored
@ -2,63 +2,63 @@ import { GatewayOpcodes } from "../../types/shared.ts";
|
||||
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
|
||||
|
||||
export function startHeartbeating(shard: Shard, interval: number) {
|
||||
// gateway.debug("GW HEARTBEATING_STARTED", { shardId, interval });
|
||||
// gateway.debug("GW HEARTBEATING_STARTED", { shardId, interval });
|
||||
|
||||
shard.heart.interval = interval;
|
||||
shard.heart.interval = interval;
|
||||
|
||||
// Only set the shard's state to `Unidentified`
|
||||
// if heartbeating has not been started due to an identify or resume action.
|
||||
if ([ShardState.Disconnected, ShardState.Offline].includes(shard.state)) {
|
||||
shard.state = ShardState.Unidentified;
|
||||
}
|
||||
// Only set the shard's state to `Unidentified`
|
||||
// if heartbeating has not been started due to an identify or resume action.
|
||||
if ([ShardState.Disconnected, ShardState.Offline].includes(shard.state)) {
|
||||
shard.state = ShardState.Unidentified;
|
||||
}
|
||||
|
||||
// The first heartbeat needs to be send with a random delay between `0` and `interval`
|
||||
// Using a `setTimeout(_, jitter)` here to accomplish that.
|
||||
// `Math.random()` can be `0` so we use `0.5` if this happens
|
||||
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating
|
||||
const jitter = Math.ceil(shard.heart.interval * (Math.random() || 0.5));
|
||||
shard.heart.timeoutId = setTimeout(() => {
|
||||
// Using a direct socket.send call here because heartbeat requests are reserved by us.
|
||||
shard.socket?.send(JSON.stringify({
|
||||
op: GatewayOpcodes.Heartbeat,
|
||||
d: shard.previousSequenceNumber,
|
||||
}));
|
||||
// The first heartbeat needs to be send with a random delay between `0` and `interval`
|
||||
// Using a `setTimeout(_, jitter)` here to accomplish that.
|
||||
// `Math.random()` can be `0` so we use `0.5` if this happens
|
||||
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating
|
||||
const jitter = Math.ceil(shard.heart.interval * (Math.random() || 0.5));
|
||||
shard.heart.timeoutId = setTimeout(() => {
|
||||
// Using a direct socket.send call here because heartbeat requests are reserved by us.
|
||||
shard.socket?.send(JSON.stringify({
|
||||
op: GatewayOpcodes.Heartbeat,
|
||||
d: shard.previousSequenceNumber,
|
||||
}));
|
||||
|
||||
shard.heart.lastBeat = Date.now();
|
||||
shard.heart.acknowledged = false;
|
||||
shard.heart.lastBeat = Date.now();
|
||||
shard.heart.acknowledged = false;
|
||||
|
||||
// After the random heartbeat jitter we can start a normal interval.
|
||||
shard.heart.intervalId = setInterval(async () => {
|
||||
// gateway.debug("GW DEBUG", `Running setInterval in heartbeat file. Shard: ${shardId}`);
|
||||
// After the random heartbeat jitter we can start a normal interval.
|
||||
shard.heart.intervalId = setInterval(async () => {
|
||||
// gateway.debug("GW DEBUG", `Running setInterval in heartbeat file. Shard: ${shardId}`);
|
||||
|
||||
// gateway.debug("GW HEARTBEATING", { shardId, shard: currentShard });
|
||||
// gateway.debug("GW HEARTBEATING", { shardId, shard: currentShard });
|
||||
|
||||
// The Shard did not receive a heartbeat ACK from Discord in time,
|
||||
// therefore we have to assume that the connection has failed or got "zombied".
|
||||
// The Shard needs to start a re-identify action accordingly.
|
||||
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating-example-gateway-heartbeat-ack
|
||||
if (!shard.heart.acknowledged) {
|
||||
shard.close(
|
||||
ShardSocketCloseCodes.ZombiedConnection,
|
||||
"Zombied connection, did not receive an heartbeat ACK in time.",
|
||||
);
|
||||
// The Shard did not receive a heartbeat ACK from Discord in time,
|
||||
// therefore we have to assume that the connection has failed or got "zombied".
|
||||
// The Shard needs to start a re-identify action accordingly.
|
||||
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating-example-gateway-heartbeat-ack
|
||||
if (!shard.heart.acknowledged) {
|
||||
shard.close(
|
||||
ShardSocketCloseCodes.ZombiedConnection,
|
||||
"Zombied connection, did not receive an heartbeat ACK in time.",
|
||||
);
|
||||
|
||||
return await shard.identify();
|
||||
}
|
||||
return await shard.identify();
|
||||
}
|
||||
|
||||
shard.heart.acknowledged = false;
|
||||
shard.heart.acknowledged = false;
|
||||
|
||||
// Using a direct socket.send call here because heartbeat requests are reserved by us.
|
||||
shard.socket?.send(
|
||||
JSON.stringify({
|
||||
op: GatewayOpcodes.Heartbeat,
|
||||
d: shard.previousSequenceNumber,
|
||||
}),
|
||||
);
|
||||
// Using a direct socket.send call here because heartbeat requests are reserved by us.
|
||||
shard.socket?.send(
|
||||
JSON.stringify({
|
||||
op: GatewayOpcodes.Heartbeat,
|
||||
d: shard.previousSequenceNumber,
|
||||
}),
|
||||
);
|
||||
|
||||
shard.heart.lastBeat = Date.now();
|
||||
shard.heart.lastBeat = Date.now();
|
||||
|
||||
shard.events.heartbeat?.(shard);
|
||||
}, shard.heart.interval);
|
||||
}, jitter);
|
||||
shard.events.heartbeat?.(shard);
|
||||
}, shard.heart.interval);
|
||||
}, jitter);
|
||||
}
|
||||
|
10
vendor/gateway/shard/stopHeartbeating.ts
vendored
10
vendor/gateway/shard/stopHeartbeating.ts
vendored
@ -1,9 +1,9 @@
|
||||
import { Shard } from "./types.ts";
|
||||
|
||||
export function stopHeartbeating(shard: Shard): void {
|
||||
// Clear the regular heartbeat interval.
|
||||
clearInterval(shard.heart.intervalId);
|
||||
// It's possible that the Shard got closed before the first jittered heartbeat.
|
||||
// To go safe we should clear the related timeout too.
|
||||
clearTimeout(shard.heart.timeoutId);
|
||||
// Clear the regular heartbeat interval.
|
||||
clearInterval(shard.heart.intervalId);
|
||||
// It's possible that the Shard got closed before the first jittered heartbeat.
|
||||
// To go safe we should clear the related timeout too.
|
||||
clearTimeout(shard.heart.timeoutId);
|
||||
}
|
||||
|
226
vendor/gateway/shard/types.ts
vendored
226
vendor/gateway/shard/types.ts
vendored
@ -11,138 +11,138 @@ export const DEFAULT_HEARTBEAT_INTERVAL = 45000;
|
||||
export type Shard = ReturnType<typeof createShard>;
|
||||
|
||||
export enum ShardState {
|
||||
/** Shard is fully connected to the gateway and receiving events from Discord. */
|
||||
Connected = 0,
|
||||
/** Shard started to connect to the gateway.
|
||||
* This is only used if the shard is not currently trying to identify or resume.
|
||||
*/
|
||||
Connecting = 1,
|
||||
/** Shard got disconnected and reconnection actions have been started. */
|
||||
Disconnected = 2,
|
||||
/** The shard is connected to the gateway but only heartbeating.
|
||||
* At this state the shard has not been identified with discord.
|
||||
*/
|
||||
Unidentified = 3,
|
||||
/** Shard is trying to identify with the gateway to create a new session. */
|
||||
Identifying = 4,
|
||||
/** Shard is trying to resume a session with the gateway. */
|
||||
Resuming = 5,
|
||||
/** Shard got shut down studied or due to a not (self) fixable error and may not attempt to reconnect on its own. */
|
||||
Offline = 6,
|
||||
/** Shard is fully connected to the gateway and receiving events from Discord. */
|
||||
Connected = 0,
|
||||
/** Shard started to connect to the gateway.
|
||||
* This is only used if the shard is not currently trying to identify or resume.
|
||||
*/
|
||||
Connecting = 1,
|
||||
/** Shard got disconnected and reconnection actions have been started. */
|
||||
Disconnected = 2,
|
||||
/** The shard is connected to the gateway but only heartbeating.
|
||||
* At this state the shard has not been identified with discord.
|
||||
*/
|
||||
Unidentified = 3,
|
||||
/** Shard is trying to identify with the gateway to create a new session. */
|
||||
Identifying = 4,
|
||||
/** Shard is trying to resume a session with the gateway. */
|
||||
Resuming = 5,
|
||||
/** Shard got shut down studied or due to a not (self) fixable error and may not attempt to reconnect on its own. */
|
||||
Offline = 6,
|
||||
}
|
||||
|
||||
export interface ShardGatewayConfig {
|
||||
/** Whether incoming payloads are compressed using zlib.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
compress: boolean;
|
||||
/** The calculated intent value of the events which the shard should receive.
|
||||
*
|
||||
* @default 0
|
||||
*/
|
||||
intents: number;
|
||||
/** Identify properties to use */
|
||||
properties: {
|
||||
/** Operating system the shard runs on.
|
||||
/** Whether incoming payloads are compressed using zlib.
|
||||
*
|
||||
* @default "darwin" | "linux" | "windows"
|
||||
* @default false
|
||||
*/
|
||||
os: string;
|
||||
/** The "browser" where this shard is running on.
|
||||
compress: boolean;
|
||||
/** The calculated intent value of the events which the shard should receive.
|
||||
*
|
||||
* @default "Discordeno"
|
||||
* @default 0
|
||||
*/
|
||||
browser: string;
|
||||
/** The device on which the shard is running.
|
||||
intents: number;
|
||||
/** Identify properties to use */
|
||||
properties: {
|
||||
/** Operating system the shard runs on.
|
||||
*
|
||||
* @default "darwin" | "linux" | "windows"
|
||||
*/
|
||||
os: string;
|
||||
/** The "browser" where this shard is running on.
|
||||
*
|
||||
* @default "Discordeno"
|
||||
*/
|
||||
browser: string;
|
||||
/** The device on which the shard is running.
|
||||
*
|
||||
* @default "Discordeno"
|
||||
*/
|
||||
device: string;
|
||||
};
|
||||
/** Bot token which is used to connect to Discord */
|
||||
token: string;
|
||||
/** The URL of the gateway which should be connected to.
|
||||
*
|
||||
* @default "Discordeno"
|
||||
* @default "wss://gateway.discord.gg"
|
||||
*/
|
||||
device: string;
|
||||
};
|
||||
/** Bot token which is used to connect to Discord */
|
||||
token: string;
|
||||
/** The URL of the gateway which should be connected to.
|
||||
*
|
||||
* @default "wss://gateway.discord.gg"
|
||||
*/
|
||||
url: string;
|
||||
/** The gateway version which should be used.
|
||||
*
|
||||
* @default 10
|
||||
*/
|
||||
version: number;
|
||||
url: string;
|
||||
/** The gateway version which should be used.
|
||||
*
|
||||
* @default 10
|
||||
*/
|
||||
version: number;
|
||||
}
|
||||
|
||||
export interface ShardHeart {
|
||||
/** Whether or not the heartbeat was acknowledged by Discord in time. */
|
||||
acknowledged: boolean;
|
||||
/** Interval between heartbeats requested by Discord. */
|
||||
interval: number;
|
||||
/** Id of the interval, which is used for sending the heartbeats. */
|
||||
intervalId?: number;
|
||||
/** Unix (in milliseconds) timestamp when the last heartbeat ACK was received from Discord. */
|
||||
lastAck?: number;
|
||||
/** Unix timestamp (in milliseconds) when the last heartbeat was sent. */
|
||||
lastBeat?: number;
|
||||
/** Round trip time (in milliseconds) from Shard to Discord and back.
|
||||
* Calculated using the heartbeat system.
|
||||
* Note: this value is undefined until the first heartbeat to Discord has happened.
|
||||
*/
|
||||
rtt?: number;
|
||||
/** Id of the timeout which is used for sending the first heartbeat to Discord since it's "special". */
|
||||
timeoutId?: number;
|
||||
/** Whether or not the heartbeat was acknowledged by Discord in time. */
|
||||
acknowledged: boolean;
|
||||
/** Interval between heartbeats requested by Discord. */
|
||||
interval: number;
|
||||
/** Id of the interval, which is used for sending the heartbeats. */
|
||||
intervalId?: number;
|
||||
/** Unix (in milliseconds) timestamp when the last heartbeat ACK was received from Discord. */
|
||||
lastAck?: number;
|
||||
/** Unix timestamp (in milliseconds) when the last heartbeat was sent. */
|
||||
lastBeat?: number;
|
||||
/** Round trip time (in milliseconds) from Shard to Discord and back.
|
||||
* Calculated using the heartbeat system.
|
||||
* Note: this value is undefined until the first heartbeat to Discord has happened.
|
||||
*/
|
||||
rtt?: number;
|
||||
/** Id of the timeout which is used for sending the first heartbeat to Discord since it's "special". */
|
||||
timeoutId?: number;
|
||||
}
|
||||
|
||||
export interface ShardEvents {
|
||||
/** A heartbeat has been send. */
|
||||
heartbeat?(shard: Shard): unknown;
|
||||
/** A heartbeat ACK was received. */
|
||||
heartbeatAck?(shard: Shard): unknown;
|
||||
/** Shard has received a Hello payload. */
|
||||
hello?(shard: Shard): unknown;
|
||||
/** The Shards session has been invalidated. */
|
||||
invalidSession?(shard: Shard, resumable: boolean): unknown;
|
||||
/** The shard has started a resume action. */
|
||||
resuming?(shard: Shard): unknown;
|
||||
/** The shard has successfully resumed an old session. */
|
||||
resumed?(shard: Shard): unknown;
|
||||
/** Discord has requested the Shard to reconnect. */
|
||||
requestedReconnect?(shard: Shard): unknown;
|
||||
/** The shard started to connect to Discord's gateway. */
|
||||
connecting?(shard: Shard): unknown;
|
||||
/** The shard is connected with Discord's gateway. */
|
||||
connected?(shard: Shard): unknown;
|
||||
/** The shard has been disconnected from Discord's gateway. */
|
||||
disconnected?(shard: Shard): unknown;
|
||||
/** The shard has started to identify itself to Discord. */
|
||||
identifying?(shard: Shard): unknown;
|
||||
/** The shard has successfully been identified itself with Discord. */
|
||||
identified?(shard: Shard): unknown;
|
||||
/** The shard has received a message from Discord. */
|
||||
message?(shard: Shard, payload: DiscordGatewayPayload): unknown;
|
||||
/** A heartbeat has been send. */
|
||||
heartbeat?(shard: Shard): unknown;
|
||||
/** A heartbeat ACK was received. */
|
||||
heartbeatAck?(shard: Shard): unknown;
|
||||
/** Shard has received a Hello payload. */
|
||||
hello?(shard: Shard): unknown;
|
||||
/** The Shards session has been invalidated. */
|
||||
invalidSession?(shard: Shard, resumable: boolean): unknown;
|
||||
/** The shard has started a resume action. */
|
||||
resuming?(shard: Shard): unknown;
|
||||
/** The shard has successfully resumed an old session. */
|
||||
resumed?(shard: Shard): unknown;
|
||||
/** Discord has requested the Shard to reconnect. */
|
||||
requestedReconnect?(shard: Shard): unknown;
|
||||
/** The shard started to connect to Discord's gateway. */
|
||||
connecting?(shard: Shard): unknown;
|
||||
/** The shard is connected with Discord's gateway. */
|
||||
connected?(shard: Shard): unknown;
|
||||
/** The shard has been disconnected from Discord's gateway. */
|
||||
disconnected?(shard: Shard): unknown;
|
||||
/** The shard has started to identify itself to Discord. */
|
||||
identifying?(shard: Shard): unknown;
|
||||
/** The shard has successfully been identified itself with Discord. */
|
||||
identified?(shard: Shard): unknown;
|
||||
/** The shard has received a message from Discord. */
|
||||
message?(shard: Shard, payload: DiscordGatewayPayload): unknown;
|
||||
}
|
||||
|
||||
export enum ShardSocketCloseCodes {
|
||||
/** A regular Shard shutdown. */
|
||||
Shutdown = 3000,
|
||||
/** A resume has been requested and therefore the old connection needs to be closed. */
|
||||
ResumeClosingOldConnection = 3024,
|
||||
/** Did not receive a heartbeat ACK in time.
|
||||
* Closing the shard and creating a new session.
|
||||
*/
|
||||
ZombiedConnection = 3010,
|
||||
/** Discordeno's gateway tests hae been finished, therefore the Shard can be turned off. */
|
||||
TestingFinished = 3064,
|
||||
/** Special close code reserved for Discordeno's zero-downtime resharding system. */
|
||||
Resharded = 3065,
|
||||
/** Shard is re-identifying therefore the old connection needs to be closed. */
|
||||
ReIdentifying = 3066,
|
||||
/** A regular Shard shutdown. */
|
||||
Shutdown = 3000,
|
||||
/** A resume has been requested and therefore the old connection needs to be closed. */
|
||||
ResumeClosingOldConnection = 3024,
|
||||
/** Did not receive a heartbeat ACK in time.
|
||||
* Closing the shard and creating a new session.
|
||||
*/
|
||||
ZombiedConnection = 3010,
|
||||
/** Discordeno's gateway tests hae been finished, therefore the Shard can be turned off. */
|
||||
TestingFinished = 3064,
|
||||
/** Special close code reserved for Discordeno's zero-downtime resharding system. */
|
||||
Resharded = 3065,
|
||||
/** Shard is re-identifying therefore the old connection needs to be closed. */
|
||||
ReIdentifying = 3066,
|
||||
}
|
||||
|
||||
export interface ShardSocketRequest {
|
||||
/** The OP-Code for the payload to send. */
|
||||
op: GatewayOpcodes;
|
||||
/** Payload data. */
|
||||
d: unknown;
|
||||
/** The OP-Code for the payload to send. */
|
||||
op: GatewayOpcodes;
|
||||
/** Payload data. */
|
||||
d: unknown;
|
||||
}
|
||||
|
20
vendor/rest/checkRateLimits.ts
vendored
20
vendor/rest/checkRateLimits.ts
vendored
@ -2,16 +2,16 @@ import { RestManager } from "./restManager.ts";
|
||||
|
||||
/** Check the rate limits for a url or a bucket. */
|
||||
export function checkRateLimits(rest: RestManager, url: string) {
|
||||
const ratelimited = rest.rateLimitedPaths.get(url);
|
||||
const global = rest.rateLimitedPaths.get("global");
|
||||
const now = Date.now();
|
||||
const ratelimited = rest.rateLimitedPaths.get(url);
|
||||
const global = rest.rateLimitedPaths.get("global");
|
||||
const now = Date.now();
|
||||
|
||||
if (ratelimited && now < ratelimited.resetTimestamp) {
|
||||
return ratelimited.resetTimestamp - now;
|
||||
}
|
||||
if (global && now < global.resetTimestamp) {
|
||||
return global.resetTimestamp - now;
|
||||
}
|
||||
if (ratelimited && now < ratelimited.resetTimestamp) {
|
||||
return ratelimited.resetTimestamp - now;
|
||||
}
|
||||
if (global && now < global.resetTimestamp) {
|
||||
return global.resetTimestamp - now;
|
||||
}
|
||||
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
16
vendor/rest/cleanupQueues.ts
vendored
16
vendor/rest/cleanupQueues.ts
vendored
@ -2,13 +2,13 @@ import { RestManager } from "./restManager.ts";
|
||||
|
||||
/** Cleans up the queues by checking if there is nothing left and removing it. */
|
||||
export function cleanupQueues(rest: RestManager) {
|
||||
for (const [key, queue] of rest.pathQueues) {
|
||||
rest.debug(`[REST - cleanupQueues] Running for of loop. ${key}`);
|
||||
if (queue.requests.length) continue;
|
||||
// REMOVE IT FROM CACHE
|
||||
rest.pathQueues.delete(key);
|
||||
}
|
||||
for (const [key, queue] of rest.pathQueues) {
|
||||
rest.debug(`[REST - cleanupQueues] Running for of loop. ${key}`);
|
||||
if (queue.requests.length) continue;
|
||||
// REMOVE IT FROM CACHE
|
||||
rest.pathQueues.delete(key);
|
||||
}
|
||||
|
||||
// NO QUEUE LEFT, DISABLE THE QUEUE
|
||||
if (!rest.pathQueues.size) rest.processingQueue = false;
|
||||
// NO QUEUE LEFT, DISABLE THE QUEUE
|
||||
if (!rest.pathQueues.size) rest.processingQueue = false;
|
||||
}
|
||||
|
4
vendor/rest/convertRestError.ts
vendored
4
vendor/rest/convertRestError.ts
vendored
@ -1,6 +1,6 @@
|
||||
import { RestRequestRejection } from "./rest.ts";
|
||||
|
||||
export function convertRestError(errorStack: Error, data: RestRequestRejection): Error {
|
||||
errorStack.message = `[${data.status}] ${data.error}\n${data.body}`;
|
||||
return errorStack;
|
||||
errorStack.message = `[${data.status}] ${data.error}\n${data.body}`;
|
||||
return errorStack;
|
||||
}
|
||||
|
96
vendor/rest/createRequestBody.ts
vendored
96
vendor/rest/createRequestBody.ts
vendored
@ -6,62 +6,62 @@ import { RequestMethod, RestPayload, RestRequest } from "./rest.ts";
|
||||
/** Creates the request body and headers that are necessary to send a request. Will handle different types of methods and everything necessary for discord. */
|
||||
// export function createRequestBody(rest: RestManager, queuedRequest: { request: RestRequest; payload: RestPayload }) {
|
||||
export function createRequestBody(rest: RestManager, options: CreateRequestBodyOptions) {
|
||||
const headers: Record<string, string> = {
|
||||
"user-agent": USER_AGENT,
|
||||
};
|
||||
const headers: Record<string, string> = {
|
||||
"user-agent": USER_AGENT,
|
||||
};
|
||||
|
||||
if (!options.unauthorized) headers["authorization"] = `Bot ${rest.token}`;
|
||||
if (!options.unauthorized) headers["authorization"] = `Bot ${rest.token}`;
|
||||
|
||||
// SOMETIMES SPECIAL HEADERS (E.G. CUSTOM AUTHORIZATION) NEED TO BE USED
|
||||
if (options.headers) {
|
||||
for (const key in options.headers) {
|
||||
headers[key.toLowerCase()] = options.headers[key];
|
||||
}
|
||||
}
|
||||
|
||||
// GET METHODS SHOULD NOT HAVE A BODY
|
||||
if (options.method === "GET") {
|
||||
options.body = undefined;
|
||||
}
|
||||
|
||||
// IF A REASON IS PROVIDED ENCODE IT IN HEADERS
|
||||
if (options.body?.reason) {
|
||||
headers["X-Audit-Log-Reason"] = encodeURIComponent(options.body.reason as string);
|
||||
options.body.reason = undefined;
|
||||
}
|
||||
|
||||
// IF A FILE/ATTACHMENT IS PRESENT WE NEED SPECIAL HANDLING
|
||||
if (options.body?.file) {
|
||||
if (!Array.isArray(options.body.file)) {
|
||||
options.body.file = [options.body.file];
|
||||
// SOMETIMES SPECIAL HEADERS (E.G. CUSTOM AUTHORIZATION) NEED TO BE USED
|
||||
if (options.headers) {
|
||||
for (const key in options.headers) {
|
||||
headers[key.toLowerCase()] = options.headers[key];
|
||||
}
|
||||
}
|
||||
|
||||
const form = new FormData();
|
||||
|
||||
for (let i = 0; i < (options.body.file as FileContent[]).length; i++) {
|
||||
form.append(
|
||||
`file${i}`,
|
||||
(options.body.file as FileContent[])[i].blob,
|
||||
(options.body.file as FileContent[])[i].name,
|
||||
);
|
||||
// GET METHODS SHOULD NOT HAVE A BODY
|
||||
if (options.method === "GET") {
|
||||
options.body = undefined;
|
||||
}
|
||||
|
||||
form.append("payload_json", JSON.stringify({ ...options.body, file: undefined }));
|
||||
options.body.file = form;
|
||||
} else if (options.body && !["GET", "DELETE"].includes(options.method)) {
|
||||
headers["Content-Type"] = "application/json";
|
||||
}
|
||||
// IF A REASON IS PROVIDED ENCODE IT IN HEADERS
|
||||
if (options.body?.reason) {
|
||||
headers["X-Audit-Log-Reason"] = encodeURIComponent(options.body.reason as string);
|
||||
options.body.reason = undefined;
|
||||
}
|
||||
|
||||
return {
|
||||
headers,
|
||||
body: (options.body?.file ?? JSON.stringify(options.body)) as FormData | string,
|
||||
method: options.method,
|
||||
};
|
||||
// IF A FILE/ATTACHMENT IS PRESENT WE NEED SPECIAL HANDLING
|
||||
if (options.body?.file) {
|
||||
if (!Array.isArray(options.body.file)) {
|
||||
options.body.file = [options.body.file];
|
||||
}
|
||||
|
||||
const form = new FormData();
|
||||
|
||||
for (let i = 0; i < (options.body.file as FileContent[]).length; i++) {
|
||||
form.append(
|
||||
`file${i}`,
|
||||
(options.body.file as FileContent[])[i].blob,
|
||||
(options.body.file as FileContent[])[i].name,
|
||||
);
|
||||
}
|
||||
|
||||
form.append("payload_json", JSON.stringify({ ...options.body, file: undefined }));
|
||||
options.body.file = form;
|
||||
} else if (options.body && !["GET", "DELETE"].includes(options.method)) {
|
||||
headers["Content-Type"] = "application/json";
|
||||
}
|
||||
|
||||
return {
|
||||
headers,
|
||||
body: (options.body?.file ?? JSON.stringify(options.body)) as FormData | string,
|
||||
method: options.method,
|
||||
};
|
||||
}
|
||||
|
||||
export interface CreateRequestBodyOptions {
|
||||
headers?: Record<string, string>;
|
||||
method: RequestMethod;
|
||||
body?: Record<string, unknown>;
|
||||
unauthorized?: boolean;
|
||||
headers?: Record<string, string>;
|
||||
method: RequestMethod;
|
||||
body?: Record<string, unknown>;
|
||||
unauthorized?: boolean;
|
||||
}
|
||||
|
142
vendor/rest/processGlobalQueue.ts
vendored
142
vendor/rest/processGlobalQueue.ts
vendored
@ -2,80 +2,80 @@ import { RestManager } from "./restManager.ts";
|
||||
import { HTTPResponseCodes } from "../types/shared.ts";
|
||||
|
||||
export async function processGlobalQueue(rest: RestManager) {
|
||||
// IF QUEUE IS EMPTY EXIT
|
||||
if (!rest.globalQueue.length) return;
|
||||
// IF QUEUE IS ALREADY RUNNING EXIT
|
||||
if (rest.globalQueueProcessing) return;
|
||||
// IF QUEUE IS EMPTY EXIT
|
||||
if (!rest.globalQueue.length) return;
|
||||
// IF QUEUE IS ALREADY RUNNING EXIT
|
||||
if (rest.globalQueueProcessing) return;
|
||||
|
||||
// SET AS TRUE SO OTHER QUEUES DON'T START
|
||||
rest.globalQueueProcessing = true;
|
||||
// SET AS TRUE SO OTHER QUEUES DON'T START
|
||||
rest.globalQueueProcessing = true;
|
||||
|
||||
while (rest.globalQueue.length) {
|
||||
// IF THE BOT IS GLOBALLY RATE LIMITED TRY AGAIN
|
||||
if (rest.globallyRateLimited) {
|
||||
setTimeout(() => {
|
||||
rest.debug(`[REST - processGlobalQueue] Globally rate limited, running setTimeout.`);
|
||||
rest.processGlobalQueue(rest);
|
||||
}, 1000);
|
||||
while (rest.globalQueue.length) {
|
||||
// IF THE BOT IS GLOBALLY RATE LIMITED TRY AGAIN
|
||||
if (rest.globallyRateLimited) {
|
||||
setTimeout(() => {
|
||||
rest.debug(`[REST - processGlobalQueue] Globally rate limited, running setTimeout.`);
|
||||
rest.processGlobalQueue(rest);
|
||||
}, 1000);
|
||||
|
||||
// BREAK WHILE LOOP
|
||||
break;
|
||||
// BREAK WHILE LOOP
|
||||
break;
|
||||
}
|
||||
|
||||
if (rest.invalidRequests === rest.maxInvalidRequests - rest.invalidRequestsSafetyAmount) {
|
||||
setTimeout(() => {
|
||||
const time = rest.invalidRequestsInterval - (Date.now() - rest.invalidRequestFrozenAt);
|
||||
rest.debug(
|
||||
`[REST - processGlobalQueue] Freeze global queue because of invalid requests. Time Remaining: ${
|
||||
time / 1000
|
||||
} seconds.`,
|
||||
);
|
||||
rest.processGlobalQueue(rest);
|
||||
}, 1000);
|
||||
|
||||
// BREAK WHILE LOOP
|
||||
break;
|
||||
}
|
||||
|
||||
const request = rest.globalQueue.shift();
|
||||
// REMOVES ANY POTENTIAL INVALID CONFLICTS
|
||||
if (!request) continue;
|
||||
|
||||
// CHECK RATE LIMITS FOR 429 REPEATS
|
||||
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
|
||||
const urlResetIn = rest.checkRateLimits(rest, request.basicURL);
|
||||
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
|
||||
const bucketResetIn = request.payload.bucketId ? rest.checkRateLimits(rest, request.payload.bucketId) : false;
|
||||
|
||||
if (urlResetIn || bucketResetIn) {
|
||||
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
|
||||
setTimeout(() => {
|
||||
rest.debug(`[REST - processGlobalQueue] rate limited, running setTimeout.`);
|
||||
// THIS REST IS RATE LIMITED, SO PUSH BACK TO START
|
||||
rest.globalQueue.unshift(request);
|
||||
// START QUEUE IF NOT STARTED
|
||||
rest.processGlobalQueue(rest);
|
||||
}, urlResetIn || (bucketResetIn as number));
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
await rest.sendRequest(rest, {
|
||||
url: request.urlToUse,
|
||||
method: request.request.method,
|
||||
bucketId: request.payload.bucketId,
|
||||
reject: request.request.reject,
|
||||
respond: request.request.respond,
|
||||
retryCount: request.payload.retryCount ?? 0,
|
||||
payload: rest.createRequestBody(rest, {
|
||||
method: request.request.method,
|
||||
body: request.payload.body,
|
||||
}),
|
||||
})
|
||||
// Should be handled in sendRequest, this catch just prevents bots from dying
|
||||
.catch(() => null);
|
||||
}
|
||||
|
||||
if (rest.invalidRequests === rest.maxInvalidRequests - rest.invalidRequestsSafetyAmount) {
|
||||
setTimeout(() => {
|
||||
const time = rest.invalidRequestsInterval - (Date.now() - rest.invalidRequestFrozenAt);
|
||||
rest.debug(
|
||||
`[REST - processGlobalQueue] Freeze global queue because of invalid requests. Time Remaining: ${
|
||||
time / 1000
|
||||
} seconds.`,
|
||||
);
|
||||
rest.processGlobalQueue(rest);
|
||||
}, 1000);
|
||||
|
||||
// BREAK WHILE LOOP
|
||||
break;
|
||||
}
|
||||
|
||||
const request = rest.globalQueue.shift();
|
||||
// REMOVES ANY POTENTIAL INVALID CONFLICTS
|
||||
if (!request) continue;
|
||||
|
||||
// CHECK RATE LIMITS FOR 429 REPEATS
|
||||
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
|
||||
const urlResetIn = rest.checkRateLimits(rest, request.basicURL);
|
||||
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
|
||||
const bucketResetIn = request.payload.bucketId ? rest.checkRateLimits(rest, request.payload.bucketId) : false;
|
||||
|
||||
if (urlResetIn || bucketResetIn) {
|
||||
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
|
||||
setTimeout(() => {
|
||||
rest.debug(`[REST - processGlobalQueue] rate limited, running setTimeout.`);
|
||||
// THIS REST IS RATE LIMITED, SO PUSH BACK TO START
|
||||
rest.globalQueue.unshift(request);
|
||||
// START QUEUE IF NOT STARTED
|
||||
rest.processGlobalQueue(rest);
|
||||
}, urlResetIn || (bucketResetIn as number));
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
await rest.sendRequest(rest, {
|
||||
url: request.urlToUse,
|
||||
method: request.request.method,
|
||||
bucketId: request.payload.bucketId,
|
||||
reject: request.request.reject,
|
||||
respond: request.request.respond,
|
||||
retryCount: request.payload.retryCount ?? 0,
|
||||
payload: rest.createRequestBody(rest, {
|
||||
method: request.request.method,
|
||||
body: request.payload.body,
|
||||
}),
|
||||
})
|
||||
// Should be handled in sendRequest, this catch just prevents bots from dying
|
||||
.catch(() => null);
|
||||
}
|
||||
|
||||
// ALLOW OTHER QUEUES TO START WHEN NEW REQUEST IS MADE
|
||||
rest.globalQueueProcessing = false;
|
||||
// ALLOW OTHER QUEUES TO START WHEN NEW REQUEST IS MADE
|
||||
rest.globalQueueProcessing = false;
|
||||
}
|
||||
|
88
vendor/rest/processQueue.ts
vendored
88
vendor/rest/processQueue.ts
vendored
@ -2,56 +2,56 @@ import { RestManager } from "./restManager.ts";
|
||||
|
||||
/** Processes the queue by looping over each path separately until the queues are empty. */
|
||||
export function processQueue(rest: RestManager, id: string) {
|
||||
const queue = rest.pathQueues.get(id);
|
||||
if (!queue) return;
|
||||
const queue = rest.pathQueues.get(id);
|
||||
if (!queue) return;
|
||||
|
||||
while (queue.requests.length) {
|
||||
rest.debug(`[REST - processQueue] Running while loop.`);
|
||||
// SELECT THE FIRST ITEM FROM THIS QUEUE
|
||||
const queuedRequest = queue.requests[0];
|
||||
// IF THIS DOESN'T HAVE ANY ITEMS JUST CANCEL, THE CLEANER WILL REMOVE IT.
|
||||
if (!queuedRequest) break;
|
||||
while (queue.requests.length) {
|
||||
rest.debug(`[REST - processQueue] Running while loop.`);
|
||||
// SELECT THE FIRST ITEM FROM THIS QUEUE
|
||||
const queuedRequest = queue.requests[0];
|
||||
// IF THIS DOESN'T HAVE ANY ITEMS JUST CANCEL, THE CLEANER WILL REMOVE IT.
|
||||
if (!queuedRequest) break;
|
||||
|
||||
const basicURL = rest.simplifyUrl(queuedRequest.request.url, queuedRequest.request.method);
|
||||
const basicURL = rest.simplifyUrl(queuedRequest.request.url, queuedRequest.request.method);
|
||||
|
||||
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
|
||||
const urlResetIn = rest.checkRateLimits(rest, basicURL);
|
||||
if (urlResetIn) {
|
||||
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
|
||||
if (!queue.isWaiting) {
|
||||
queue.isWaiting = true;
|
||||
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
|
||||
const urlResetIn = rest.checkRateLimits(rest, basicURL);
|
||||
if (urlResetIn) {
|
||||
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
|
||||
if (!queue.isWaiting) {
|
||||
queue.isWaiting = true;
|
||||
|
||||
setTimeout(() => {
|
||||
queue.isWaiting = false;
|
||||
setTimeout(() => {
|
||||
queue.isWaiting = false;
|
||||
|
||||
rest.debug(`[REST - processQueue] rate limited, running setTimeout.`);
|
||||
rest.processQueue(rest, id);
|
||||
}, urlResetIn);
|
||||
}
|
||||
rest.debug(`[REST - processQueue] rate limited, running setTimeout.`);
|
||||
rest.processQueue(rest, id);
|
||||
}, urlResetIn);
|
||||
}
|
||||
|
||||
// BREAK WHILE LOOP
|
||||
break;
|
||||
// BREAK WHILE LOOP
|
||||
break;
|
||||
}
|
||||
|
||||
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
|
||||
const bucketResetIn = queuedRequest.payload.bucketId
|
||||
? rest.checkRateLimits(rest, queuedRequest.payload.bucketId)
|
||||
: false;
|
||||
// THIS BUCKET IS STILL RATE LIMITED, RE-ADD TO QUEUE
|
||||
if (bucketResetIn) continue;
|
||||
// EXECUTE THE REQUEST
|
||||
|
||||
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
|
||||
rest.debug(`[REST - Add To Global Queue] ${JSON.stringify(queuedRequest.payload)}`);
|
||||
rest.globalQueue.push({
|
||||
...queuedRequest,
|
||||
urlToUse: queuedRequest.request.url,
|
||||
basicURL,
|
||||
});
|
||||
rest.processGlobalQueue(rest);
|
||||
queue.requests.shift();
|
||||
}
|
||||
|
||||
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
|
||||
const bucketResetIn = queuedRequest.payload.bucketId
|
||||
? rest.checkRateLimits(rest, queuedRequest.payload.bucketId)
|
||||
: false;
|
||||
// THIS BUCKET IS STILL RATE LIMITED, RE-ADD TO QUEUE
|
||||
if (bucketResetIn) continue;
|
||||
// EXECUTE THE REQUEST
|
||||
|
||||
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
|
||||
rest.debug(`[REST - Add To Global Queue] ${JSON.stringify(queuedRequest.payload)}`);
|
||||
rest.globalQueue.push({
|
||||
...queuedRequest,
|
||||
urlToUse: queuedRequest.request.url,
|
||||
basicURL,
|
||||
});
|
||||
rest.processGlobalQueue(rest);
|
||||
queue.requests.shift();
|
||||
}
|
||||
|
||||
// ONCE QUEUE IS DONE, WE CAN TRY CLEANING UP
|
||||
rest.cleanupQueues(rest);
|
||||
// ONCE QUEUE IS DONE, WE CAN TRY CLEANING UP
|
||||
rest.cleanupQueues(rest);
|
||||
}
|
||||
|
42
vendor/rest/processRateLimitedPaths.ts
vendored
42
vendor/rest/processRateLimitedPaths.ts
vendored
@ -2,28 +2,28 @@ import { RestManager } from "./restManager.ts";
|
||||
|
||||
/** This will create a infinite loop running in 1 seconds using tail recursion to keep rate limits clean. When a rate limit resets, this will remove it so the queue can proceed. */
|
||||
export function processRateLimitedPaths(rest: RestManager) {
|
||||
const now = Date.now();
|
||||
const now = Date.now();
|
||||
|
||||
for (const [key, value] of rest.rateLimitedPaths.entries()) {
|
||||
rest.debug(`[REST - processRateLimitedPaths] Running for of loop. ${value.resetTimestamp - now}`);
|
||||
// IF THE TIME HAS NOT REACHED CANCEL
|
||||
if (value.resetTimestamp > now) continue;
|
||||
for (const [key, value] of rest.rateLimitedPaths.entries()) {
|
||||
rest.debug(`[REST - processRateLimitedPaths] Running for of loop. ${value.resetTimestamp - now}`);
|
||||
// IF THE TIME HAS NOT REACHED CANCEL
|
||||
if (value.resetTimestamp > now) continue;
|
||||
|
||||
// RATE LIMIT IS OVER, DELETE THE RATE LIMITER
|
||||
rest.rateLimitedPaths.delete(key);
|
||||
// IF IT WAS GLOBAL ALSO MARK THE GLOBAL VALUE AS FALSE
|
||||
if (key === "global") rest.globallyRateLimited = false;
|
||||
}
|
||||
// RATE LIMIT IS OVER, DELETE THE RATE LIMITER
|
||||
rest.rateLimitedPaths.delete(key);
|
||||
// IF IT WAS GLOBAL ALSO MARK THE GLOBAL VALUE AS FALSE
|
||||
if (key === "global") rest.globallyRateLimited = false;
|
||||
}
|
||||
|
||||
// ALL PATHS ARE CLEARED CAN CANCEL OUT!
|
||||
if (!rest.rateLimitedPaths.size) {
|
||||
rest.processingRateLimitedPaths = false;
|
||||
} else {
|
||||
rest.processingRateLimitedPaths = true;
|
||||
// RECHECK IN 1 SECOND
|
||||
setTimeout(() => {
|
||||
rest.debug(`[REST - processRateLimitedPaths] Running setTimeout.`);
|
||||
rest.processRateLimitedPaths(rest);
|
||||
}, 1000);
|
||||
}
|
||||
// ALL PATHS ARE CLEARED CAN CANCEL OUT!
|
||||
if (!rest.rateLimitedPaths.size) {
|
||||
rest.processingRateLimitedPaths = false;
|
||||
} else {
|
||||
rest.processingRateLimitedPaths = true;
|
||||
// RECHECK IN 1 SECOND
|
||||
setTimeout(() => {
|
||||
rest.debug(`[REST - processRateLimitedPaths] Running setTimeout.`);
|
||||
rest.processRateLimitedPaths(rest);
|
||||
}, 1000);
|
||||
}
|
||||
}
|
||||
|
54
vendor/rest/processRequest.ts
vendored
54
vendor/rest/processRequest.ts
vendored
@ -4,33 +4,33 @@ import { RestPayload, RestRequest } from "./rest.ts";
|
||||
|
||||
/** Processes a request and assigns it to a queue or creates a queue if none exists for it. */
|
||||
export function processRequest(rest: RestManager, request: RestRequest, payload: RestPayload) {
|
||||
const route = request.url.substring(request.url.indexOf("api/"));
|
||||
const parts = route.split("/");
|
||||
// REMOVE THE API
|
||||
parts.shift();
|
||||
// REMOVES THE VERSION NUMBER
|
||||
if (parts[0]?.startsWith("v")) parts.shift();
|
||||
// SET THE NEW REQUEST URL
|
||||
request.url = `${BASE_URL}/v${rest.version}/${parts.join("/")}`;
|
||||
// REMOVE THE MAJOR PARAM
|
||||
parts.shift();
|
||||
const route = request.url.substring(request.url.indexOf("api/"));
|
||||
const parts = route.split("/");
|
||||
// REMOVE THE API
|
||||
parts.shift();
|
||||
// REMOVES THE VERSION NUMBER
|
||||
if (parts[0]?.startsWith("v")) parts.shift();
|
||||
// SET THE NEW REQUEST URL
|
||||
request.url = `${BASE_URL}/v${rest.version}/${parts.join("/")}`;
|
||||
// REMOVE THE MAJOR PARAM
|
||||
parts.shift();
|
||||
|
||||
const url = rest.simplifyUrl(request.url, request.method);
|
||||
const url = rest.simplifyUrl(request.url, request.method);
|
||||
|
||||
const queue = rest.pathQueues.get(url);
|
||||
if (queue) {
|
||||
queue.requests.push({ request, payload });
|
||||
} else {
|
||||
// CREATES A NEW QUEUE
|
||||
rest.pathQueues.set(url, {
|
||||
isWaiting: false,
|
||||
requests: [
|
||||
{
|
||||
request,
|
||||
payload,
|
||||
},
|
||||
],
|
||||
});
|
||||
rest.processQueue(rest, url);
|
||||
}
|
||||
const queue = rest.pathQueues.get(url);
|
||||
if (queue) {
|
||||
queue.requests.push({ request, payload });
|
||||
} else {
|
||||
// CREATES A NEW QUEUE
|
||||
rest.pathQueues.set(url, {
|
||||
isWaiting: false,
|
||||
requests: [
|
||||
{
|
||||
request,
|
||||
payload,
|
||||
},
|
||||
],
|
||||
});
|
||||
rest.processQueue(rest, url);
|
||||
}
|
||||
}
|
||||
|
96
vendor/rest/processRequestHeaders.ts
vendored
96
vendor/rest/processRequestHeaders.ts
vendored
@ -2,62 +2,62 @@ import { RestManager } from "./restManager.ts";
|
||||
|
||||
/** Processes the rate limit headers and determines if it needs to be rate limited and returns the bucket id if available */
|
||||
export function processRequestHeaders(rest: RestManager, url: string, headers: Headers) {
|
||||
let rateLimited = false;
|
||||
let rateLimited = false;
|
||||
|
||||
// GET ALL NECESSARY HEADERS
|
||||
const remaining = headers.get("x-ratelimit-remaining");
|
||||
const retryAfter = headers.get("x-ratelimit-reset-after");
|
||||
const reset = Date.now() + Number(retryAfter) * 1000;
|
||||
const global = headers.get("x-ratelimit-global");
|
||||
// undefined override null needed for typings
|
||||
const bucketId = headers.get("x-ratelimit-bucket") || undefined;
|
||||
// GET ALL NECESSARY HEADERS
|
||||
const remaining = headers.get("x-ratelimit-remaining");
|
||||
const retryAfter = headers.get("x-ratelimit-reset-after");
|
||||
const reset = Date.now() + Number(retryAfter) * 1000;
|
||||
const global = headers.get("x-ratelimit-global");
|
||||
// undefined override null needed for typings
|
||||
const bucketId = headers.get("x-ratelimit-bucket") || undefined;
|
||||
|
||||
// IF THERE IS NO REMAINING RATE LIMIT, MARK IT AS RATE LIMITED
|
||||
if (remaining === "0") {
|
||||
rateLimited = true;
|
||||
// IF THERE IS NO REMAINING RATE LIMIT, MARK IT AS RATE LIMITED
|
||||
if (remaining === "0") {
|
||||
rateLimited = true;
|
||||
|
||||
// SAVE THE URL AS LIMITED, IMPORTANT FOR NEW REQUESTS BY USER WITHOUT BUCKET
|
||||
rest.rateLimitedPaths.set(url, {
|
||||
url,
|
||||
resetTimestamp: reset,
|
||||
bucketId,
|
||||
});
|
||||
// SAVE THE URL AS LIMITED, IMPORTANT FOR NEW REQUESTS BY USER WITHOUT BUCKET
|
||||
rest.rateLimitedPaths.set(url, {
|
||||
url,
|
||||
resetTimestamp: reset,
|
||||
bucketId,
|
||||
});
|
||||
|
||||
// SAVE THE BUCKET AS LIMITED SINCE DIFFERENT URLS MAY SHARE A BUCKET
|
||||
if (bucketId) {
|
||||
rest.rateLimitedPaths.set(bucketId, {
|
||||
url,
|
||||
resetTimestamp: reset,
|
||||
bucketId,
|
||||
});
|
||||
// SAVE THE BUCKET AS LIMITED SINCE DIFFERENT URLS MAY SHARE A BUCKET
|
||||
if (bucketId) {
|
||||
rest.rateLimitedPaths.set(bucketId, {
|
||||
url,
|
||||
resetTimestamp: reset,
|
||||
bucketId,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IF THERE IS NO REMAINING GLOBAL LIMIT, MARK IT RATE LIMITED GLOBALLY
|
||||
if (global) {
|
||||
const retryAfter = headers.get("retry-after");
|
||||
const globalReset = Date.now() + Number(retryAfter) * 1000;
|
||||
rest.debug(`[REST = Globally Rate Limited] URL: ${url} | Global Rest: ${globalReset}`);
|
||||
rest.globallyRateLimited = true;
|
||||
rateLimited = true;
|
||||
// IF THERE IS NO REMAINING GLOBAL LIMIT, MARK IT RATE LIMITED GLOBALLY
|
||||
if (global) {
|
||||
const retryAfter = headers.get("retry-after");
|
||||
const globalReset = Date.now() + Number(retryAfter) * 1000;
|
||||
rest.debug(`[REST = Globally Rate Limited] URL: ${url} | Global Rest: ${globalReset}`);
|
||||
rest.globallyRateLimited = true;
|
||||
rateLimited = true;
|
||||
|
||||
rest.rateLimitedPaths.set("global", {
|
||||
url: "global",
|
||||
resetTimestamp: globalReset,
|
||||
bucketId,
|
||||
});
|
||||
rest.rateLimitedPaths.set("global", {
|
||||
url: "global",
|
||||
resetTimestamp: globalReset,
|
||||
bucketId,
|
||||
});
|
||||
|
||||
if (bucketId) {
|
||||
rest.rateLimitedPaths.set(bucketId, {
|
||||
url: "global",
|
||||
resetTimestamp: globalReset,
|
||||
bucketId,
|
||||
});
|
||||
if (bucketId) {
|
||||
rest.rateLimitedPaths.set(bucketId, {
|
||||
url: "global",
|
||||
resetTimestamp: globalReset,
|
||||
bucketId,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rateLimited && !rest.processingRateLimitedPaths) {
|
||||
rest.processRateLimitedPaths(rest);
|
||||
}
|
||||
return rateLimited ? bucketId : undefined;
|
||||
if (rateLimited && !rest.processingRateLimitedPaths) {
|
||||
rest.processRateLimitedPaths(rest);
|
||||
}
|
||||
return rateLimited ? bucketId : undefined;
|
||||
}
|
||||
|
30
vendor/rest/rest.ts
vendored
30
vendor/rest/rest.ts
vendored
@ -1,31 +1,31 @@
|
||||
export interface RestRequest {
|
||||
url: string;
|
||||
method: RequestMethod;
|
||||
respond: (payload: RestRequestResponse) => unknown;
|
||||
reject: (payload: RestRequestRejection) => unknown;
|
||||
url: string;
|
||||
method: RequestMethod;
|
||||
respond: (payload: RestRequestResponse) => unknown;
|
||||
reject: (payload: RestRequestRejection) => unknown;
|
||||
}
|
||||
|
||||
export interface RestRequestResponse {
|
||||
ok: boolean;
|
||||
status: number;
|
||||
body?: string;
|
||||
ok: boolean;
|
||||
status: number;
|
||||
body?: string;
|
||||
}
|
||||
|
||||
export interface RestRequestRejection extends RestRequestResponse {
|
||||
error: string;
|
||||
error: string;
|
||||
}
|
||||
|
||||
export interface RestPayload {
|
||||
bucketId?: string;
|
||||
body?: Record<string, unknown>;
|
||||
retryCount: number;
|
||||
headers?: Record<string, string>;
|
||||
bucketId?: string;
|
||||
body?: Record<string, unknown>;
|
||||
retryCount: number;
|
||||
headers?: Record<string, string>;
|
||||
}
|
||||
|
||||
export interface RestRateLimitedPath {
|
||||
url: string;
|
||||
resetTimestamp: number;
|
||||
bucketId?: string;
|
||||
url: string;
|
||||
resetTimestamp: number;
|
||||
bucketId?: string;
|
||||
}
|
||||
|
||||
export type RequestMethod = "GET" | "POST" | "PUT" | "DELETE" | "PATCH";
|
||||
|
152
vendor/rest/restManager.ts
vendored
152
vendor/rest/restManager.ts
vendored
@ -16,87 +16,87 @@ import { removeTokenPrefix } from "../util/token.ts";
|
||||
import { sendRequest } from "./sendRequest.ts";
|
||||
|
||||
export function createRestManager(options: CreateRestManagerOptions) {
|
||||
const version = options.version || API_VERSION;
|
||||
const version = options.version || API_VERSION;
|
||||
|
||||
if (options.customUrl) {
|
||||
baseEndpoints.BASE_URL = `${options.customUrl}/v${version}`;
|
||||
}
|
||||
if (options.customUrl) {
|
||||
baseEndpoints.BASE_URL = `${options.customUrl}/v${version}`;
|
||||
}
|
||||
|
||||
return {
|
||||
// current invalid amount
|
||||
invalidRequests: 0,
|
||||
// max invalid requests allowed until ban
|
||||
maxInvalidRequests: 10000,
|
||||
// 10 minutes
|
||||
invalidRequestsInterval: 600000,
|
||||
// timer to reset to 0
|
||||
invalidRequestsTimeoutId: 0,
|
||||
// how safe to be from max
|
||||
invalidRequestsSafetyAmount: 1,
|
||||
// when first request in this period was made
|
||||
invalidRequestFrozenAt: 0,
|
||||
invalidRequestErrorStatuses: [401, 403, 429],
|
||||
version,
|
||||
token: removeTokenPrefix(options.token),
|
||||
maxRetryCount: options.maxRetryCount || 10,
|
||||
secretKey: options.secretKey || "discordeno_best_lib_ever",
|
||||
customUrl: options.customUrl || "",
|
||||
pathQueues: new Map<
|
||||
string,
|
||||
{
|
||||
isWaiting: boolean;
|
||||
requests: {
|
||||
request: RestRequest;
|
||||
payload: RestPayload;
|
||||
}[];
|
||||
}
|
||||
>(),
|
||||
processingQueue: false,
|
||||
processingRateLimitedPaths: false,
|
||||
globallyRateLimited: false,
|
||||
globalQueue: [] as {
|
||||
request: RestRequest;
|
||||
payload: RestPayload;
|
||||
basicURL: string;
|
||||
urlToUse: string;
|
||||
}[],
|
||||
globalQueueProcessing: false,
|
||||
rateLimitedPaths: new Map<string, RestRateLimitedPath>(),
|
||||
debug: options.debug || function (_text: string) {},
|
||||
checkRateLimits: options.checkRateLimits || checkRateLimits,
|
||||
cleanupQueues: options.cleanupQueues || cleanupQueues,
|
||||
processQueue: options.processQueue || processQueue,
|
||||
processRateLimitedPaths: options.processRateLimitedPaths || processRateLimitedPaths,
|
||||
processRequestHeaders: options.processRequestHeaders || processRequestHeaders,
|
||||
processRequest: options.processRequest || processRequest,
|
||||
createRequestBody: options.createRequestBody || createRequestBody,
|
||||
runMethod: options.runMethod || runMethod,
|
||||
simplifyUrl: options.simplifyUrl || simplifyUrl,
|
||||
processGlobalQueue: options.processGlobalQueue || processGlobalQueue,
|
||||
convertRestError: options.convertRestError || convertRestError,
|
||||
sendRequest: options.sendRequest || sendRequest,
|
||||
};
|
||||
return {
|
||||
// current invalid amount
|
||||
invalidRequests: 0,
|
||||
// max invalid requests allowed until ban
|
||||
maxInvalidRequests: 10000,
|
||||
// 10 minutes
|
||||
invalidRequestsInterval: 600000,
|
||||
// timer to reset to 0
|
||||
invalidRequestsTimeoutId: 0,
|
||||
// how safe to be from max
|
||||
invalidRequestsSafetyAmount: 1,
|
||||
// when first request in this period was made
|
||||
invalidRequestFrozenAt: 0,
|
||||
invalidRequestErrorStatuses: [401, 403, 429],
|
||||
version,
|
||||
token: removeTokenPrefix(options.token),
|
||||
maxRetryCount: options.maxRetryCount || 10,
|
||||
secretKey: options.secretKey || "discordeno_best_lib_ever",
|
||||
customUrl: options.customUrl || "",
|
||||
pathQueues: new Map<
|
||||
string,
|
||||
{
|
||||
isWaiting: boolean;
|
||||
requests: {
|
||||
request: RestRequest;
|
||||
payload: RestPayload;
|
||||
}[];
|
||||
}
|
||||
>(),
|
||||
processingQueue: false,
|
||||
processingRateLimitedPaths: false,
|
||||
globallyRateLimited: false,
|
||||
globalQueue: [] as {
|
||||
request: RestRequest;
|
||||
payload: RestPayload;
|
||||
basicURL: string;
|
||||
urlToUse: string;
|
||||
}[],
|
||||
globalQueueProcessing: false,
|
||||
rateLimitedPaths: new Map<string, RestRateLimitedPath>(),
|
||||
debug: options.debug || function (_text: string) {},
|
||||
checkRateLimits: options.checkRateLimits || checkRateLimits,
|
||||
cleanupQueues: options.cleanupQueues || cleanupQueues,
|
||||
processQueue: options.processQueue || processQueue,
|
||||
processRateLimitedPaths: options.processRateLimitedPaths || processRateLimitedPaths,
|
||||
processRequestHeaders: options.processRequestHeaders || processRequestHeaders,
|
||||
processRequest: options.processRequest || processRequest,
|
||||
createRequestBody: options.createRequestBody || createRequestBody,
|
||||
runMethod: options.runMethod || runMethod,
|
||||
simplifyUrl: options.simplifyUrl || simplifyUrl,
|
||||
processGlobalQueue: options.processGlobalQueue || processGlobalQueue,
|
||||
convertRestError: options.convertRestError || convertRestError,
|
||||
sendRequest: options.sendRequest || sendRequest,
|
||||
};
|
||||
}
|
||||
|
||||
export interface CreateRestManagerOptions {
|
||||
token: string;
|
||||
customUrl?: string;
|
||||
maxRetryCount?: number;
|
||||
version?: number;
|
||||
secretKey?: string;
|
||||
debug?: (text: string) => unknown;
|
||||
checkRateLimits?: typeof checkRateLimits;
|
||||
cleanupQueues?: typeof cleanupQueues;
|
||||
processQueue?: typeof processQueue;
|
||||
processRateLimitedPaths?: typeof processRateLimitedPaths;
|
||||
processRequestHeaders?: typeof processRequestHeaders;
|
||||
processRequest?: typeof processRequest;
|
||||
createRequestBody?: typeof createRequestBody;
|
||||
runMethod?: typeof runMethod;
|
||||
simplifyUrl?: typeof simplifyUrl;
|
||||
processGlobalQueue?: typeof processGlobalQueue;
|
||||
convertRestError?: typeof convertRestError;
|
||||
sendRequest?: typeof sendRequest;
|
||||
token: string;
|
||||
customUrl?: string;
|
||||
maxRetryCount?: number;
|
||||
version?: number;
|
||||
secretKey?: string;
|
||||
debug?: (text: string) => unknown;
|
||||
checkRateLimits?: typeof checkRateLimits;
|
||||
cleanupQueues?: typeof cleanupQueues;
|
||||
processQueue?: typeof processQueue;
|
||||
processRateLimitedPaths?: typeof processRateLimitedPaths;
|
||||
processRequestHeaders?: typeof processRequestHeaders;
|
||||
processRequest?: typeof processRequest;
|
||||
createRequestBody?: typeof createRequestBody;
|
||||
runMethod?: typeof runMethod;
|
||||
simplifyUrl?: typeof simplifyUrl;
|
||||
processGlobalQueue?: typeof processGlobalQueue;
|
||||
convertRestError?: typeof convertRestError;
|
||||
sendRequest?: typeof sendRequest;
|
||||
}
|
||||
|
||||
export type RestManager = ReturnType<typeof createRestManager>;
|
||||
|
132
vendor/rest/runMethod.ts
vendored
132
vendor/rest/runMethod.ts
vendored
@ -3,76 +3,76 @@ import { API_VERSION, BASE_URL, baseEndpoints } from "../util/constants.ts";
|
||||
import { RequestMethod, RestRequestRejection, RestRequestResponse } from "./rest.ts";
|
||||
|
||||
export async function runMethod<T = any>(
|
||||
rest: RestManager,
|
||||
method: RequestMethod,
|
||||
route: string,
|
||||
body?: unknown,
|
||||
options?: {
|
||||
retryCount?: number;
|
||||
bucketId?: string;
|
||||
headers?: Record<string, string>;
|
||||
},
|
||||
rest: RestManager,
|
||||
method: RequestMethod,
|
||||
route: string,
|
||||
body?: unknown,
|
||||
options?: {
|
||||
retryCount?: number;
|
||||
bucketId?: string;
|
||||
headers?: Record<string, string>;
|
||||
},
|
||||
): Promise<T> {
|
||||
rest.debug(
|
||||
`[REST - RequestCreate] Method: ${method} | URL: ${route} | Retry Count: ${
|
||||
options?.retryCount ?? 0
|
||||
} | Bucket ID: ${options?.bucketId} | Body: ${
|
||||
JSON.stringify(
|
||||
body,
|
||||
)
|
||||
}`,
|
||||
);
|
||||
rest.debug(
|
||||
`[REST - RequestCreate] Method: ${method} | URL: ${route} | Retry Count: ${
|
||||
options?.retryCount ?? 0
|
||||
} | Bucket ID: ${options?.bucketId} | Body: ${
|
||||
JSON.stringify(
|
||||
body,
|
||||
)
|
||||
}`,
|
||||
);
|
||||
|
||||
const errorStack = new Error("Location:");
|
||||
// @ts-ignore Breaks deno deploy. Luca said add ts-ignore until it's fixed
|
||||
Error.captureStackTrace(errorStack);
|
||||
const errorStack = new Error("Location:");
|
||||
// @ts-ignore Breaks deno deploy. Luca said add ts-ignore until it's fixed
|
||||
Error.captureStackTrace(errorStack);
|
||||
|
||||
// For proxies we don't need to do any of the legwork so we just forward the request
|
||||
if (!baseEndpoints.BASE_URL.startsWith(BASE_URL) && route[0] === "/") {
|
||||
const result = await fetch(`${baseEndpoints.BASE_URL}${route}`, {
|
||||
body: body ? JSON.stringify(body) : undefined,
|
||||
headers: {
|
||||
Authorization: rest.secretKey,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
method,
|
||||
}).catch((error) => {
|
||||
errorStack.message = (error as Error)?.message;
|
||||
console.error(error);
|
||||
throw errorStack;
|
||||
});
|
||||
// For proxies we don't need to do any of the legwork so we just forward the request
|
||||
if (!baseEndpoints.BASE_URL.startsWith(BASE_URL) && route[0] === "/") {
|
||||
const result = await fetch(`${baseEndpoints.BASE_URL}${route}`, {
|
||||
body: body ? JSON.stringify(body) : undefined,
|
||||
headers: {
|
||||
Authorization: rest.secretKey,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
method,
|
||||
}).catch((error) => {
|
||||
errorStack.message = (error as Error)?.message;
|
||||
console.error(error);
|
||||
throw errorStack;
|
||||
});
|
||||
|
||||
if (!result.ok) {
|
||||
errorStack.message = result.statusText;
|
||||
rest.debug(`[ERROR] ${errorStack.message}`);
|
||||
// Closes the response to prevent memory leak
|
||||
await result.text();
|
||||
throw errorStack;
|
||||
if (!result.ok) {
|
||||
errorStack.message = result.statusText;
|
||||
rest.debug(`[ERROR] ${errorStack.message}`);
|
||||
// Closes the response to prevent memory leak
|
||||
await result.text();
|
||||
throw errorStack;
|
||||
}
|
||||
|
||||
return result.status !== 204 ? await result.json() : undefined;
|
||||
}
|
||||
|
||||
return result.status !== 204 ? await result.json() : undefined;
|
||||
}
|
||||
|
||||
// No proxy so we need to handle all rate limiting and such
|
||||
return new Promise((resolve, reject) => {
|
||||
rest.processRequest(
|
||||
rest,
|
||||
{
|
||||
url: route[0] === "/" ? `${BASE_URL}/v${API_VERSION}${route}` : route,
|
||||
method,
|
||||
reject: (data: RestRequestRejection) => {
|
||||
const restError = rest.convertRestError(errorStack, data);
|
||||
reject(restError);
|
||||
},
|
||||
respond: (data: RestRequestResponse) =>
|
||||
resolve(data.status !== 204 ? JSON.parse(data.body ?? "{}") : (undefined as unknown as T)),
|
||||
},
|
||||
{
|
||||
bucketId: options?.bucketId,
|
||||
body: body as Record<string, unknown> | undefined,
|
||||
retryCount: options?.retryCount ?? 0,
|
||||
headers: options?.headers,
|
||||
},
|
||||
);
|
||||
});
|
||||
// No proxy so we need to handle all rate limiting and such
|
||||
return new Promise((resolve, reject) => {
|
||||
rest.processRequest(
|
||||
rest,
|
||||
{
|
||||
url: route[0] === "/" ? `${BASE_URL}/v${API_VERSION}${route}` : route,
|
||||
method,
|
||||
reject: (data: RestRequestRejection) => {
|
||||
const restError = rest.convertRestError(errorStack, data);
|
||||
reject(restError);
|
||||
},
|
||||
respond: (data: RestRequestResponse) =>
|
||||
resolve(data.status !== 204 ? JSON.parse(data.body ?? "{}") : (undefined as unknown as T)),
|
||||
},
|
||||
{
|
||||
bucketId: options?.bucketId,
|
||||
body: body as Record<string, unknown> | undefined,
|
||||
retryCount: options?.retryCount ?? 0,
|
||||
headers: options?.headers,
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
72
vendor/rest/runProxyMethod.ts
vendored
72
vendor/rest/runProxyMethod.ts
vendored
@ -6,42 +6,42 @@ export type ProxyMethodResponse<T> = Omit<RestRequestResponse | RestRequestRejec
|
||||
// Left out proxy request, because it's not needed here
|
||||
// this file could also be moved to a plugin.
|
||||
export async function runProxyMethod<T = any>(
|
||||
rest: RestManager,
|
||||
method: "GET" | "POST" | "PUT" | "DELETE" | "PATCH",
|
||||
url: string,
|
||||
body?: unknown,
|
||||
retryCount = 0,
|
||||
bucketId?: string,
|
||||
rest: RestManager,
|
||||
method: "GET" | "POST" | "PUT" | "DELETE" | "PATCH",
|
||||
url: string,
|
||||
body?: unknown,
|
||||
retryCount = 0,
|
||||
bucketId?: string,
|
||||
): Promise<ProxyMethodResponse<T>> {
|
||||
rest.debug(
|
||||
`[REST - RequestCreate] Method: ${method} | URL: ${url} | Retry Count: ${retryCount} | Bucket ID: ${bucketId} | Body: ${
|
||||
JSON.stringify(
|
||||
body,
|
||||
)
|
||||
}`,
|
||||
);
|
||||
|
||||
// No proxy so we need to handle all rate limiting and such
|
||||
return new Promise((resolve, reject) => {
|
||||
rest.processRequest(
|
||||
rest,
|
||||
{
|
||||
url,
|
||||
method,
|
||||
reject: (data: RestRequestRejection) => {
|
||||
const { body: b, ...r } = data;
|
||||
reject({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
|
||||
},
|
||||
respond: (data: RestRequestResponse) => {
|
||||
const { body: b, ...r } = data;
|
||||
resolve({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
|
||||
},
|
||||
},
|
||||
{
|
||||
bucketId,
|
||||
body: body as Record<string, unknown> | undefined,
|
||||
retryCount,
|
||||
},
|
||||
rest.debug(
|
||||
`[REST - RequestCreate] Method: ${method} | URL: ${url} | Retry Count: ${retryCount} | Bucket ID: ${bucketId} | Body: ${
|
||||
JSON.stringify(
|
||||
body,
|
||||
)
|
||||
}`,
|
||||
);
|
||||
});
|
||||
|
||||
// No proxy so we need to handle all rate limiting and such
|
||||
return new Promise((resolve, reject) => {
|
||||
rest.processRequest(
|
||||
rest,
|
||||
{
|
||||
url,
|
||||
method,
|
||||
reject: (data: RestRequestRejection) => {
|
||||
const { body: b, ...r } = data;
|
||||
reject({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
|
||||
},
|
||||
respond: (data: RestRequestResponse) => {
|
||||
const { body: b, ...r } = data;
|
||||
resolve({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
|
||||
},
|
||||
},
|
||||
{
|
||||
bucketId,
|
||||
body: body as Record<string, unknown> | undefined,
|
||||
retryCount,
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
274
vendor/rest/sendRequest.ts
vendored
274
vendor/rest/sendRequest.ts
vendored
@ -4,152 +4,152 @@ import { RequestMethod } from "./rest.ts";
|
||||
import { RestManager } from "./restManager.ts";
|
||||
|
||||
export interface RestSendRequestOptions {
|
||||
url: string;
|
||||
method: RequestMethod;
|
||||
bucketId?: string;
|
||||
reject?: Function;
|
||||
respond?: Function;
|
||||
retryCount?: number;
|
||||
payload?: {
|
||||
headers: Record<string, string>;
|
||||
body: string | FormData;
|
||||
};
|
||||
url: string;
|
||||
method: RequestMethod;
|
||||
bucketId?: string;
|
||||
reject?: Function;
|
||||
respond?: Function;
|
||||
retryCount?: number;
|
||||
payload?: {
|
||||
headers: Record<string, string>;
|
||||
body: string | FormData;
|
||||
};
|
||||
}
|
||||
|
||||
export async function sendRequest<T>(rest: RestManager, options: RestSendRequestOptions): Promise<T> {
|
||||
try {
|
||||
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
|
||||
rest.debug(`[REST - fetching] URL: ${options.url} | ${JSON.stringify(options)}`);
|
||||
try {
|
||||
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
|
||||
rest.debug(`[REST - fetching] URL: ${options.url} | ${JSON.stringify(options)}`);
|
||||
|
||||
const response = await fetch(
|
||||
options.url.startsWith(BASE_URL) ? options.url : `${BASE_URL}/v${rest.version}/${options.url}`,
|
||||
{
|
||||
method: options.method,
|
||||
headers: options.payload?.headers,
|
||||
body: options.payload?.body,
|
||||
},
|
||||
);
|
||||
rest.debug(`[REST - fetched] URL: ${options.url} | ${JSON.stringify(options)}`);
|
||||
const response = await fetch(
|
||||
options.url.startsWith(BASE_URL) ? options.url : `${BASE_URL}/v${rest.version}/${options.url}`,
|
||||
{
|
||||
method: options.method,
|
||||
headers: options.payload?.headers,
|
||||
body: options.payload?.body,
|
||||
},
|
||||
);
|
||||
rest.debug(`[REST - fetched] URL: ${options.url} | ${JSON.stringify(options)}`);
|
||||
|
||||
const bucketIdFromHeaders = rest.processRequestHeaders(
|
||||
rest,
|
||||
rest.simplifyUrl(options.url, options.method),
|
||||
response.headers,
|
||||
);
|
||||
// SET THE BUCKET Id IF IT WAS PRESENT
|
||||
if (bucketIdFromHeaders) {
|
||||
options.bucketId = bucketIdFromHeaders;
|
||||
}
|
||||
|
||||
if (response.status < 200 || response.status >= 400) {
|
||||
rest.debug(
|
||||
`[REST - httpError] Payload: ${JSON.stringify(options)} | Response: ${JSON.stringify(response)}`,
|
||||
);
|
||||
|
||||
let error = "REQUEST_UNKNOWN_ERROR";
|
||||
switch (response.status) {
|
||||
case HTTPResponseCodes.BadRequest:
|
||||
error = "The options was improperly formatted, or the server couldn't understand it.";
|
||||
break;
|
||||
case HTTPResponseCodes.Unauthorized:
|
||||
error = "The Authorization header was missing or invalid.";
|
||||
break;
|
||||
case HTTPResponseCodes.Forbidden:
|
||||
error = "The Authorization token you passed did not have permission to the resource.";
|
||||
break;
|
||||
case HTTPResponseCodes.NotFound:
|
||||
error = "The resource at the location specified doesn't exist.";
|
||||
break;
|
||||
case HTTPResponseCodes.MethodNotAllowed:
|
||||
error = "The HTTP method used is not valid for the location specified.";
|
||||
break;
|
||||
case HTTPResponseCodes.GatewayUnavailable:
|
||||
error = "There was not a gateway available to process your options. Wait a bit and retry.";
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
rest.invalidRequestErrorStatuses.includes(response.status) &&
|
||||
!(response.status === 429 && response.headers.get("X-RateLimit-Scope"))
|
||||
) {
|
||||
// INCREMENT CURRENT INVALID REQUESTS
|
||||
++rest.invalidRequests;
|
||||
|
||||
if (!rest.invalidRequestsTimeoutId) {
|
||||
rest.invalidRequestsTimeoutId = setTimeout(() => {
|
||||
rest.debug(`[REST - processGlobalQueue] Resetting invalid optionss counter in setTimeout.`);
|
||||
rest.invalidRequests = 0;
|
||||
rest.invalidRequestsTimeoutId = 0;
|
||||
}, rest.invalidRequestsInterval);
|
||||
const bucketIdFromHeaders = rest.processRequestHeaders(
|
||||
rest,
|
||||
rest.simplifyUrl(options.url, options.method),
|
||||
response.headers,
|
||||
);
|
||||
// SET THE BUCKET Id IF IT WAS PRESENT
|
||||
if (bucketIdFromHeaders) {
|
||||
options.bucketId = bucketIdFromHeaders;
|
||||
}
|
||||
}
|
||||
|
||||
// If NOT rate limited remove from queue
|
||||
if (response.status !== 429) {
|
||||
if (response.status < 200 || response.status >= 400) {
|
||||
rest.debug(
|
||||
`[REST - httpError] Payload: ${JSON.stringify(options)} | Response: ${JSON.stringify(response)}`,
|
||||
);
|
||||
|
||||
let error = "REQUEST_UNKNOWN_ERROR";
|
||||
switch (response.status) {
|
||||
case HTTPResponseCodes.BadRequest:
|
||||
error = "The options was improperly formatted, or the server couldn't understand it.";
|
||||
break;
|
||||
case HTTPResponseCodes.Unauthorized:
|
||||
error = "The Authorization header was missing or invalid.";
|
||||
break;
|
||||
case HTTPResponseCodes.Forbidden:
|
||||
error = "The Authorization token you passed did not have permission to the resource.";
|
||||
break;
|
||||
case HTTPResponseCodes.NotFound:
|
||||
error = "The resource at the location specified doesn't exist.";
|
||||
break;
|
||||
case HTTPResponseCodes.MethodNotAllowed:
|
||||
error = "The HTTP method used is not valid for the location specified.";
|
||||
break;
|
||||
case HTTPResponseCodes.GatewayUnavailable:
|
||||
error = "There was not a gateway available to process your options. Wait a bit and retry.";
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
rest.invalidRequestErrorStatuses.includes(response.status) &&
|
||||
!(response.status === 429 && response.headers.get("X-RateLimit-Scope"))
|
||||
) {
|
||||
// INCREMENT CURRENT INVALID REQUESTS
|
||||
++rest.invalidRequests;
|
||||
|
||||
if (!rest.invalidRequestsTimeoutId) {
|
||||
rest.invalidRequestsTimeoutId = setTimeout(() => {
|
||||
rest.debug(`[REST - processGlobalQueue] Resetting invalid optionss counter in setTimeout.`);
|
||||
rest.invalidRequests = 0;
|
||||
rest.invalidRequestsTimeoutId = 0;
|
||||
}, rest.invalidRequestsInterval);
|
||||
}
|
||||
}
|
||||
|
||||
// If NOT rate limited remove from queue
|
||||
if (response.status !== 429) {
|
||||
options.reject?.({
|
||||
ok: false,
|
||||
status: response.status,
|
||||
error,
|
||||
body: response.type ? JSON.stringify(await response.json()) : undefined,
|
||||
});
|
||||
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
ok: false,
|
||||
status: response.status,
|
||||
error,
|
||||
body: response.type ? JSON.stringify(await response.json()) : undefined,
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
if (options.retryCount && options.retryCount++ >= rest.maxRetryCount) {
|
||||
rest.debug(`[REST - RetriesMaxed] ${JSON.stringify(options)}`);
|
||||
// REMOVE ITEM FROM QUEUE TO PREVENT RETRY
|
||||
options.reject?.({
|
||||
ok: false,
|
||||
status: response.status,
|
||||
error: "The options was rate limited and it maxed out the retries limit.",
|
||||
});
|
||||
|
||||
// @ts-ignore Code should never reach here
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOMETIMES DISCORD RETURNS AN EMPTY 204 RESPONSE THAT CAN'T BE MADE TO JSON
|
||||
if (response.status === 204) {
|
||||
rest.debug(`[REST - FetchSuccess] URL: ${options.url} | ${JSON.stringify(options)}`);
|
||||
options.respond?.({
|
||||
ok: true,
|
||||
status: 204,
|
||||
});
|
||||
// @ts-ignore 204 will be void
|
||||
return;
|
||||
} else {
|
||||
// CONVERT THE RESPONSE TO JSON
|
||||
const json = JSON.stringify(await response.json());
|
||||
|
||||
rest.debug(`[REST - fetchSuccess] ${JSON.stringify(options)}`);
|
||||
options.respond?.({
|
||||
ok: true,
|
||||
status: 200,
|
||||
body: json,
|
||||
});
|
||||
|
||||
return JSON.parse(json);
|
||||
}
|
||||
} catch (error) {
|
||||
// SOMETHING WENT WRONG, LOG AND RESPOND WITH ERROR
|
||||
rest.debug(`[REST - fetchFailed] Payload: ${JSON.stringify(options)} | Error: ${error}`);
|
||||
options.reject?.({
|
||||
ok: false,
|
||||
status: response.status,
|
||||
error,
|
||||
body: response.type ? JSON.stringify(await response.json()) : undefined,
|
||||
ok: false,
|
||||
status: 599,
|
||||
error: "Internal Proxy Error",
|
||||
});
|
||||
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
ok: false,
|
||||
status: response.status,
|
||||
error,
|
||||
body: response.type ? JSON.stringify(await response.json()) : undefined,
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
if (options.retryCount && options.retryCount++ >= rest.maxRetryCount) {
|
||||
rest.debug(`[REST - RetriesMaxed] ${JSON.stringify(options)}`);
|
||||
// REMOVE ITEM FROM QUEUE TO PREVENT RETRY
|
||||
options.reject?.({
|
||||
ok: false,
|
||||
status: response.status,
|
||||
error: "The options was rate limited and it maxed out the retries limit.",
|
||||
});
|
||||
|
||||
// @ts-ignore Code should never reach here
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw new Error("Something went wrong in sendRequest", {
|
||||
cause: error,
|
||||
});
|
||||
}
|
||||
|
||||
// SOMETIMES DISCORD RETURNS AN EMPTY 204 RESPONSE THAT CAN'T BE MADE TO JSON
|
||||
if (response.status === 204) {
|
||||
rest.debug(`[REST - FetchSuccess] URL: ${options.url} | ${JSON.stringify(options)}`);
|
||||
options.respond?.({
|
||||
ok: true,
|
||||
status: 204,
|
||||
});
|
||||
// @ts-ignore 204 will be void
|
||||
return;
|
||||
} else {
|
||||
// CONVERT THE RESPONSE TO JSON
|
||||
const json = JSON.stringify(await response.json());
|
||||
|
||||
rest.debug(`[REST - fetchSuccess] ${JSON.stringify(options)}`);
|
||||
options.respond?.({
|
||||
ok: true,
|
||||
status: 200,
|
||||
body: json,
|
||||
});
|
||||
|
||||
return JSON.parse(json);
|
||||
}
|
||||
} catch (error) {
|
||||
// SOMETHING WENT WRONG, LOG AND RESPOND WITH ERROR
|
||||
rest.debug(`[REST - fetchFailed] Payload: ${JSON.stringify(options)} | Error: ${error}`);
|
||||
options.reject?.({
|
||||
ok: false,
|
||||
status: 599,
|
||||
error: "Internal Proxy Error",
|
||||
});
|
||||
|
||||
throw new Error("Something went wrong in sendRequest", {
|
||||
cause: error,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
28
vendor/rest/simplifyUrl.ts
vendored
28
vendor/rest/simplifyUrl.ts
vendored
@ -5,21 +5,21 @@
|
||||
|
||||
/** Split a url to separate rate limit buckets based on major/minor parameters. */
|
||||
export function simplifyUrl(url: string, method: string) {
|
||||
let route = url
|
||||
.replace(/\/([a-z-]+)\/(?:[0-9]{17,19})/g, function (match, p) {
|
||||
return ["channels", "guilds"].includes(p) ? match : `/${p}/skillzPrefersID`;
|
||||
})
|
||||
.replace(/\/reactions\/[^/]+/g, "/reactions/skillzPrefersID");
|
||||
let route = url
|
||||
.replace(/\/([a-z-]+)\/(?:[0-9]{17,19})/g, function (match, p) {
|
||||
return ["channels", "guilds"].includes(p) ? match : `/${p}/skillzPrefersID`;
|
||||
})
|
||||
.replace(/\/reactions\/[^/]+/g, "/reactions/skillzPrefersID");
|
||||
|
||||
// GENERAL /reactions and /reactions/emoji/@me share the buckets
|
||||
if (route.includes("/reactions")) {
|
||||
route = route.substring(0, route.indexOf("/reactions") + "/reactions".length);
|
||||
}
|
||||
// GENERAL /reactions and /reactions/emoji/@me share the buckets
|
||||
if (route.includes("/reactions")) {
|
||||
route = route.substring(0, route.indexOf("/reactions") + "/reactions".length);
|
||||
}
|
||||
|
||||
// Delete Message endpoint has its own rate limit
|
||||
if (method === "DELETE" && route.endsWith("/messages/skillzPrefersID")) {
|
||||
route = method + route;
|
||||
}
|
||||
// Delete Message endpoint has its own rate limit
|
||||
if (method === "DELETE" && route.endsWith("/messages/skillzPrefersID")) {
|
||||
route = method + route;
|
||||
}
|
||||
|
||||
return route;
|
||||
return route;
|
||||
}
|
||||
|
3754
vendor/types/discord.ts
vendored
3754
vendor/types/discord.ts
vendored
File diff suppressed because it is too large
Load Diff
2095
vendor/types/shared.ts
vendored
2095
vendor/types/shared.ts
vendored
File diff suppressed because it is too large
Load Diff
244
vendor/util/bucket.ts
vendored
244
vendor/util/bucket.ts
vendored
@ -8,168 +8,168 @@ import { delay } from "./delay.ts";
|
||||
* NOTE: This bucket is lazy, means it only updates when a related method is called.
|
||||
*/
|
||||
export interface LeakyBucket {
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
// ----------
|
||||
// PROPERTIES
|
||||
// ----------
|
||||
|
||||
/** How many tokens this bucket can hold. */
|
||||
max: number;
|
||||
/** Amount of tokens gained per interval.
|
||||
* If bigger than `max` it will be pressed to `max`.
|
||||
*/
|
||||
refillAmount: number;
|
||||
/** Interval at which the bucket gains tokens. */
|
||||
refillInterval: number;
|
||||
/** How many tokens this bucket can hold. */
|
||||
max: number;
|
||||
/** Amount of tokens gained per interval.
|
||||
* If bigger than `max` it will be pressed to `max`.
|
||||
*/
|
||||
refillAmount: number;
|
||||
/** Interval at which the bucket gains tokens. */
|
||||
refillInterval: number;
|
||||
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
// ----------
|
||||
// METHODS
|
||||
// ----------
|
||||
|
||||
/** Acquire tokens from the bucket.
|
||||
* Resolves when the tokens are acquired and available.
|
||||
* @param {boolean} [highPriority=false] Whether this acquire is should be done asap.
|
||||
*/
|
||||
acquire(amount: number, highPriority?: boolean): Promise<void>;
|
||||
/** Acquire tokens from the bucket.
|
||||
* Resolves when the tokens are acquired and available.
|
||||
* @param {boolean} [highPriority=false] Whether this acquire is should be done asap.
|
||||
*/
|
||||
acquire(amount: number, highPriority?: boolean): Promise<void>;
|
||||
|
||||
/** Returns the number of milliseconds until the next refill. */
|
||||
nextRefill(): number;
|
||||
/** Returns the number of milliseconds until the next refill. */
|
||||
nextRefill(): number;
|
||||
|
||||
/** Current tokens in the bucket. */
|
||||
tokens(): number;
|
||||
/** Current tokens in the bucket. */
|
||||
tokens(): number;
|
||||
|
||||
// ----------
|
||||
// INTERNAL STATES
|
||||
// ----------
|
||||
// ----------
|
||||
// INTERNAL STATES
|
||||
// ----------
|
||||
|
||||
/** @private Internal track of when the last refill of tokens was.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
lastRefill: number;
|
||||
/** @private Internal track of when the last refill of tokens was.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
lastRefill: number;
|
||||
|
||||
/** @private Internal state of whether currently it is allowed to acquire tokens.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
allowAcquire: boolean;
|
||||
/** @private Internal state of whether currently it is allowed to acquire tokens.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
allowAcquire: boolean;
|
||||
|
||||
/** @private Internal number of currently available tokens.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
tokensState: number;
|
||||
/** @private Internal number of currently available tokens.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
tokensState: number;
|
||||
|
||||
/** @private Internal array of promises necessary to guarantee no race conditions.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
waiting: ((_?: unknown) => void)[];
|
||||
/** @private Internal array of promises necessary to guarantee no race conditions.
|
||||
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
|
||||
*/
|
||||
waiting: ((_?: unknown) => void)[];
|
||||
}
|
||||
|
||||
export function createLeakyBucket(
|
||||
{ max, refillInterval, refillAmount, tokens, waiting, ...rest }:
|
||||
& Omit<
|
||||
PickPartial<
|
||||
LeakyBucket,
|
||||
"max" | "refillInterval" | "refillAmount"
|
||||
>,
|
||||
"tokens"
|
||||
>
|
||||
& {
|
||||
/** Current tokens in the bucket.
|
||||
* @default max
|
||||
*/
|
||||
tokens?: number;
|
||||
},
|
||||
{ max, refillInterval, refillAmount, tokens, waiting, ...rest }:
|
||||
& Omit<
|
||||
PickPartial<
|
||||
LeakyBucket,
|
||||
"max" | "refillInterval" | "refillAmount"
|
||||
>,
|
||||
"tokens"
|
||||
>
|
||||
& {
|
||||
/** Current tokens in the bucket.
|
||||
* @default max
|
||||
*/
|
||||
tokens?: number;
|
||||
},
|
||||
): LeakyBucket {
|
||||
return {
|
||||
max,
|
||||
refillInterval,
|
||||
refillAmount: refillAmount > max ? max : refillAmount,
|
||||
lastRefill: performance.now(),
|
||||
allowAcquire: true,
|
||||
return {
|
||||
max,
|
||||
refillInterval,
|
||||
refillAmount: refillAmount > max ? max : refillAmount,
|
||||
lastRefill: performance.now(),
|
||||
allowAcquire: true,
|
||||
|
||||
nextRefill: function () {
|
||||
return nextRefill(this);
|
||||
},
|
||||
nextRefill: function () {
|
||||
return nextRefill(this);
|
||||
},
|
||||
|
||||
tokens: function () {
|
||||
return updateTokens(this);
|
||||
},
|
||||
tokens: function () {
|
||||
return updateTokens(this);
|
||||
},
|
||||
|
||||
acquire: async function (amount, highPriority) {
|
||||
return await acquire(this, amount, highPriority);
|
||||
},
|
||||
acquire: async function (amount, highPriority) {
|
||||
return await acquire(this, amount, highPriority);
|
||||
},
|
||||
|
||||
tokensState: tokens ?? max,
|
||||
waiting: waiting ?? [],
|
||||
tokensState: tokens ?? max,
|
||||
waiting: waiting ?? [],
|
||||
|
||||
...rest,
|
||||
};
|
||||
...rest,
|
||||
};
|
||||
}
|
||||
|
||||
/** Update the tokens of that bucket.
|
||||
* @returns {number} The amount of current available tokens.
|
||||
*/
|
||||
function updateTokens(bucket: LeakyBucket): number {
|
||||
const timePassed = performance.now() - bucket.lastRefill;
|
||||
const missedRefills = Math.floor(timePassed / bucket.refillInterval);
|
||||
const timePassed = performance.now() - bucket.lastRefill;
|
||||
const missedRefills = Math.floor(timePassed / bucket.refillInterval);
|
||||
|
||||
// The refill shall not exceed the max amount of tokens.
|
||||
bucket.tokensState = Math.min(bucket.tokensState + (bucket.refillAmount * missedRefills), bucket.max);
|
||||
bucket.lastRefill += bucket.refillInterval * missedRefills;
|
||||
// The refill shall not exceed the max amount of tokens.
|
||||
bucket.tokensState = Math.min(bucket.tokensState + (bucket.refillAmount * missedRefills), bucket.max);
|
||||
bucket.lastRefill += bucket.refillInterval * missedRefills;
|
||||
|
||||
return bucket.tokensState;
|
||||
return bucket.tokensState;
|
||||
}
|
||||
|
||||
function nextRefill(bucket: LeakyBucket): number {
|
||||
// Since this bucket is lazy update the tokens before calculating the next refill.
|
||||
updateTokens(bucket);
|
||||
// Since this bucket is lazy update the tokens before calculating the next refill.
|
||||
updateTokens(bucket);
|
||||
|
||||
return (performance.now() - bucket.lastRefill) + bucket.refillInterval;
|
||||
return (performance.now() - bucket.lastRefill) + bucket.refillInterval;
|
||||
}
|
||||
|
||||
async function acquire(bucket: LeakyBucket, amount: number, highPriority = false): Promise<void> {
|
||||
// To prevent the race condition of 2 acquires happening at once,
|
||||
// check whether its currently allowed to acquire.
|
||||
if (!bucket.allowAcquire) {
|
||||
// create, push, and wait until the current running acquiring is finished.
|
||||
await new Promise((resolve) => {
|
||||
if (highPriority) {
|
||||
bucket.waiting.unshift(resolve);
|
||||
} else {
|
||||
bucket.waiting.push(resolve);
|
||||
}
|
||||
});
|
||||
|
||||
// Somehow another acquire has started,
|
||||
// so need to wait again.
|
||||
// To prevent the race condition of 2 acquires happening at once,
|
||||
// check whether its currently allowed to acquire.
|
||||
if (!bucket.allowAcquire) {
|
||||
return await acquire(bucket, amount);
|
||||
// create, push, and wait until the current running acquiring is finished.
|
||||
await new Promise((resolve) => {
|
||||
if (highPriority) {
|
||||
bucket.waiting.unshift(resolve);
|
||||
} else {
|
||||
bucket.waiting.push(resolve);
|
||||
}
|
||||
});
|
||||
|
||||
// Somehow another acquire has started,
|
||||
// so need to wait again.
|
||||
if (!bucket.allowAcquire) {
|
||||
return await acquire(bucket, amount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bucket.allowAcquire = false;
|
||||
// Since the bucket is lazy update the tokens now,
|
||||
// and also get the current amount of available tokens
|
||||
let currentTokens = updateTokens(bucket);
|
||||
bucket.allowAcquire = false;
|
||||
// Since the bucket is lazy update the tokens now,
|
||||
// and also get the current amount of available tokens
|
||||
let currentTokens = updateTokens(bucket);
|
||||
|
||||
// It's possible that more than available tokens have been acquired,
|
||||
// so calculate the amount of milliseconds to wait until this acquire is good to go.
|
||||
if (currentTokens < amount) {
|
||||
const tokensNeeded = amount - currentTokens;
|
||||
let refillsNeeded = Math.ceil(tokensNeeded / bucket.refillAmount);
|
||||
// It's possible that more than available tokens have been acquired,
|
||||
// so calculate the amount of milliseconds to wait until this acquire is good to go.
|
||||
if (currentTokens < amount) {
|
||||
const tokensNeeded = amount - currentTokens;
|
||||
let refillsNeeded = Math.ceil(tokensNeeded / bucket.refillAmount);
|
||||
|
||||
const waitTime = bucket.refillInterval * refillsNeeded;
|
||||
await delay(waitTime);
|
||||
const waitTime = bucket.refillInterval * refillsNeeded;
|
||||
await delay(waitTime);
|
||||
|
||||
// Update the tokens again to ensure nothing has been missed.
|
||||
updateTokens(bucket);
|
||||
}
|
||||
// Update the tokens again to ensure nothing has been missed.
|
||||
updateTokens(bucket);
|
||||
}
|
||||
|
||||
// In order to not subtract too much from the tokens,
|
||||
// calculate what is actually needed to subtract.
|
||||
const toSubtract = (amount % bucket.refillAmount) || amount;
|
||||
bucket.tokensState -= toSubtract;
|
||||
// In order to not subtract too much from the tokens,
|
||||
// calculate what is actually needed to subtract.
|
||||
const toSubtract = (amount % bucket.refillAmount) || amount;
|
||||
bucket.tokensState -= toSubtract;
|
||||
|
||||
// Allow the next acquire to happen.
|
||||
bucket.allowAcquire = true;
|
||||
// If there is an acquire waiting, let it continue.
|
||||
bucket.waiting.shift()?.();
|
||||
// Allow the next acquire to happen.
|
||||
bucket.allowAcquire = true;
|
||||
// If there is an acquire waiting, let it continue.
|
||||
bucket.waiting.shift()?.();
|
||||
}
|
||||
|
168
vendor/util/collection.ts
vendored
168
vendor/util/collection.ts
vendored
@ -1,100 +1,100 @@
|
||||
export class Collection<K, V> extends Map<K, V> {
|
||||
maxSize: number | undefined;
|
||||
maxSize: number | undefined;
|
||||
|
||||
constructor(entries?: (readonly (readonly [K, V])[] | null) | Map<K, V>, options?: CollectionOptions<K, V>) {
|
||||
super(entries ?? []);
|
||||
constructor(entries?: (readonly (readonly [K, V])[] | null) | Map<K, V>, options?: CollectionOptions<K, V>) {
|
||||
super(entries ?? []);
|
||||
|
||||
this.maxSize = options?.maxSize;
|
||||
}
|
||||
|
||||
set(key: K, value: V) {
|
||||
// When this collection is maxSized make sure we can add first
|
||||
if ((this.maxSize || this.maxSize === 0) && this.size >= this.maxSize) {
|
||||
return this;
|
||||
this.maxSize = options?.maxSize;
|
||||
}
|
||||
|
||||
return super.set(key, value);
|
||||
}
|
||||
set(key: K, value: V) {
|
||||
// When this collection is maxSized make sure we can add first
|
||||
if ((this.maxSize || this.maxSize === 0) && this.size >= this.maxSize) {
|
||||
return this;
|
||||
}
|
||||
|
||||
forceSet(key: K, value: V) {
|
||||
return super.set(key, value);
|
||||
}
|
||||
|
||||
array() {
|
||||
return [...this.values()];
|
||||
}
|
||||
|
||||
/** Retrieve the value of the first element in this collection */
|
||||
first(): V | undefined {
|
||||
return this.values().next().value;
|
||||
}
|
||||
|
||||
last(): V | undefined {
|
||||
return [...this.values()][this.size - 1];
|
||||
}
|
||||
|
||||
random(): V | undefined {
|
||||
const array = [...this.values()];
|
||||
return array[Math.floor(Math.random() * array.length)];
|
||||
}
|
||||
|
||||
find(callback: (value: V, key: K) => boolean) {
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
if (callback(value, key)) return value;
|
||||
}
|
||||
// If nothing matched
|
||||
return;
|
||||
}
|
||||
|
||||
filter(callback: (value: V, key: K) => boolean) {
|
||||
const relevant = new Collection<K, V>();
|
||||
this.forEach((value, key) => {
|
||||
if (callback(value, key)) relevant.set(key, value);
|
||||
});
|
||||
|
||||
return relevant;
|
||||
}
|
||||
|
||||
map<T>(callback: (value: V, key: K) => T) {
|
||||
const results = [];
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
results.push(callback(value, key));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
some(callback: (value: V, key: K) => boolean) {
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
if (callback(value, key)) return true;
|
||||
return super.set(key, value);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
every(callback: (value: V, key: K) => boolean) {
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
if (!callback(value, key)) return false;
|
||||
forceSet(key: K, value: V) {
|
||||
return super.set(key, value);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
reduce<T>(callback: (accumulator: T, value: V, key: K) => T, initialValue?: T): T {
|
||||
let accumulator: T = initialValue!;
|
||||
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
accumulator = callback(accumulator, value, key);
|
||||
array() {
|
||||
return [...this.values()];
|
||||
}
|
||||
|
||||
return accumulator;
|
||||
}
|
||||
/** Retrieve the value of the first element in this collection */
|
||||
first(): V | undefined {
|
||||
return this.values().next().value;
|
||||
}
|
||||
|
||||
last(): V | undefined {
|
||||
return [...this.values()][this.size - 1];
|
||||
}
|
||||
|
||||
random(): V | undefined {
|
||||
const array = [...this.values()];
|
||||
return array[Math.floor(Math.random() * array.length)];
|
||||
}
|
||||
|
||||
find(callback: (value: V, key: K) => boolean) {
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
if (callback(value, key)) return value;
|
||||
}
|
||||
// If nothing matched
|
||||
return;
|
||||
}
|
||||
|
||||
filter(callback: (value: V, key: K) => boolean) {
|
||||
const relevant = new Collection<K, V>();
|
||||
this.forEach((value, key) => {
|
||||
if (callback(value, key)) relevant.set(key, value);
|
||||
});
|
||||
|
||||
return relevant;
|
||||
}
|
||||
|
||||
map<T>(callback: (value: V, key: K) => T) {
|
||||
const results = [];
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
results.push(callback(value, key));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
some(callback: (value: V, key: K) => boolean) {
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
if (callback(value, key)) return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
every(callback: (value: V, key: K) => boolean) {
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
if (!callback(value, key)) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
reduce<T>(callback: (accumulator: T, value: V, key: K) => T, initialValue?: T): T {
|
||||
let accumulator: T = initialValue!;
|
||||
|
||||
for (const key of this.keys()) {
|
||||
const value = this.get(key)!;
|
||||
accumulator = callback(accumulator, value, key);
|
||||
}
|
||||
|
||||
return accumulator;
|
||||
}
|
||||
}
|
||||
|
||||
export interface CollectionOptions<K, V> {
|
||||
maxSize?: number;
|
||||
maxSize?: number;
|
||||
}
|
||||
|
4
vendor/util/constants.ts
vendored
4
vendor/util/constants.ts
vendored
@ -16,8 +16,8 @@ export const IMAGE_BASE_URL = "https://cdn.discordapp.com";
|
||||
|
||||
// This can be modified by big brain bots and use a proxy
|
||||
export const baseEndpoints = {
|
||||
BASE_URL: `${BASE_URL}/v${API_VERSION}`,
|
||||
CDN_URL: IMAGE_BASE_URL,
|
||||
BASE_URL: `${BASE_URL}/v${API_VERSION}`,
|
||||
CDN_URL: IMAGE_BASE_URL,
|
||||
};
|
||||
|
||||
export const SLASH_COMMANDS_NAME_REGEX = /^[-_\p{L}\p{N}\p{sc=Deva}\p{sc=Thai}]{1,32}$/u;
|
||||
|
10
vendor/util/delay.ts
vendored
10
vendor/util/delay.ts
vendored
@ -1,8 +1,8 @@
|
||||
/** Pause the execution for a given amount of milliseconds. */
|
||||
export function delay(ms: number): Promise<void> {
|
||||
return new Promise((res): number =>
|
||||
setTimeout((): void => {
|
||||
res();
|
||||
}, ms)
|
||||
);
|
||||
return new Promise((res): number =>
|
||||
setTimeout((): void => {
|
||||
res();
|
||||
}, ms)
|
||||
);
|
||||
}
|
||||
|
14
vendor/util/token.ts
vendored
14
vendor/util/token.ts
vendored
@ -1,14 +1,14 @@
|
||||
/** Removes the Bot before the token. */
|
||||
export function removeTokenPrefix(token?: string, type: "GATEWAY" | "REST" = "REST"): string {
|
||||
// If no token is provided, throw an error
|
||||
if (!token) throw new Error(`The ${type} was not given a token. Please provide a token and try again.`);
|
||||
// If the token does not have a prefix just return token
|
||||
if (!token.startsWith("Bot ")) return token;
|
||||
// Remove the prefix and return only the token.
|
||||
return token.substring(token.indexOf(" ") + 1);
|
||||
// If no token is provided, throw an error
|
||||
if (!token) throw new Error(`The ${type} was not given a token. Please provide a token and try again.`);
|
||||
// If the token does not have a prefix just return token
|
||||
if (!token.startsWith("Bot ")) return token;
|
||||
// Remove the prefix and return only the token.
|
||||
return token.substring(token.indexOf(" ") + 1);
|
||||
}
|
||||
|
||||
/** Get the bot id from the bot token. WARNING: Discord staff has mentioned this may not be stable forever. Use at your own risk. However, note for over 5 years this has never broken. */
|
||||
export function getBotIdFromToken(token: string) {
|
||||
return BigInt(atob(token.split(".")[0]));
|
||||
return BigInt(atob(token.split(".")[0]));
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user