Initial commit'

This commit is contained in:
Yuzu 2022-06-19 14:44:13 -05:00
parent b32bda0978
commit 17f66a0a46
57 changed files with 6865 additions and 0 deletions

0
mod.ts Normal file
View File

3
vendor/external.ts vendored Normal file
View File

@ -0,0 +1,3 @@
export * from "./gateway/mod.ts";
export * from "./rest/mod.ts";
export * from "./types/mod.ts";

61
vendor/gateway/README.md vendored Normal file
View File

@ -0,0 +1,61 @@
# Standalone WS / Proxy WS
This WS service is meant for ADVANCED DEVELOPERS ONLY!
## Benefits
- **Zero Downtime Updates**:
- Your bot can be updated in a matter of seconds. With normal sharding, you have to restart which also has to process
identifying all your shards with a 1/~5s rate limit. With WS handling moved to a proxy process, this allows you to
instantly get the bot code restarted without any concerns of delays. If you have a bot on 200,000 servers normally
this would mean a 20 minute delay to restart your bot if you made a small change and restarted.
- **Zero Downtime Resharding**:
- Discord stops letting your bot get added to new servers at certain points in time. For example, suppose you had
150,000 servers running 150 shards. The maximum amount of servers your shards could hold is 150 \* 2500 = 375,000.
If your bot reaches this, it can no longer join new servers until it re-shards.
- DD proxy provides 2 types of re-sharding. Automated and manual. You can also have both.
- `Automated`: This system will automatically begin a Zero-downtime resharding process behind the scenes when you
reach 80% of your maximum servers allowed by your shards. For example, since 375,000 was the max, at 300,000 we
would begin re-sharding behind the scenes with `ZERO DOWNTIME`.
- 80% of maximum servers reached (The % of 80% is customizable.)
- Identify limits have room to allow re-sharding. (Also customizable)
- `Manual`: You can also trigger this manually should you choose.
- **Horizontal Scaling**:
- The proxy system allows you to scale the bot horizontally. When you reach a huge size, you can either keep spending
more money to keep beefing up your server or you can buy several cheaper servers and scale horizontally. The proxy
means you can have WS handling on a completely separate system.
- **No Loss Restarts**:
- When you restart a bot without the proxy system, normally you would lose many events. Users may be using commands or
messages are sent that will not be filtered. As your bot's grow this number rises dramatically. Users may join who
wont get the auto-roles or any other actions your bot should take. With the proxy system, you can keep restarting
your bot and never lose any events. Events will be put into a queue while your bot is down(max size of queue is
customizable), once the bot is available the queue will begin processing all events.
- **Controllers**:
- The controller aspect gives you full control over everything inside the proxy. You can provide a function to simply
override the handler. For example, if you would like a certain function to do something different, instead of having
to fork and maintain your fork, you can just provide a function to override.
- **Clustering With Workers**:
- Take full advantage of all your CPU cores by using workers to spread the load. Control how many shards per worker
and how many workers to maximize efficiency!
## Usage
```ts
createGatewayManager({
// TODO: (docs) Fill this out
});
```
## API/Docs
// TODO: (docs) Fill this out. List all props/methods.

7
vendor/gateway/calculateShardId.ts vendored Normal file
View File

@ -0,0 +1,7 @@
import { GatewayManager } from "./manager/gatewayManager.ts";
export function calculateShardId(gateway: GatewayManager, guildId: bigint) {
if (gateway.manager.totalShards === 1) return 0;
return Number((guildId >> 22n) % BigInt(gateway.manager.totalShards - 1));
}

View File

@ -0,0 +1,16 @@
import { GatewayManager } from "./gatewayManager.ts";
/** Handler used to determine max number of shards to use based upon the max concurrency. */
export function calculateTotalShards(gateway: GatewayManager): number {
// Bots under 100k servers do not have access to total shards.
if (gateway.manager.totalShards < 100) return gateway.manager.totalShards;
// Calculate a multiple of `maxConcurrency` which can be used to connect to the gateway.
return Math.ceil(
gateway.manager.totalShards /
// If `maxConcurrency` is 1 we can safely use 16.
(gateway.gatewayBot.sessionStartLimit.maxConcurrency === 1
? 16
: gateway.gatewayBot.sessionStartLimit.maxConcurrency),
) * gateway.gatewayBot.sessionStartLimit.maxConcurrency;
}

View File

@ -0,0 +1,13 @@
import { GatewayManager } from "./gatewayManager.ts";
export function calculateWorkerId(manager: GatewayManager, shardId: number) {
// Ignore decimal numbers.
let workerId = Math.floor((shardId) / manager.shardsPerWorker);
// If the workerId overflows the maximal allowed workers we by default just use to last worker.
if (workerId >= manager.totalWorkers) {
// The Id of the last available worker is total -1
workerId = manager.totalWorkers - 1;
}
return workerId;
}

294
vendor/gateway/manager/gatewayManager.ts vendored Normal file
View File

@ -0,0 +1,294 @@
import { DiscordGatewayPayload } from "../../types/discord.ts";
import { GatewayBot, PickPartial } from "../../types/shared.ts";
import { LeakyBucket } from "../../util/bucket.ts";
import { CreateShard, createShard } from "../shard/createShard.ts";
import { Shard, ShardGatewayConfig } from "../shard/types.ts";
import { calculateTotalShards } from "./calculateTotalShards.ts";
import { calculateWorkerId } from "./calculateWorkerId.ts";
// import {
// markNewGuildShardId,
// resharder,
// resharderCloseOldShards,
// resharderIsPending,
// reshardingEditGuildShardIds,
// } from "./resharder.ts";
import { spawnShards } from "./spawnShards.ts";
import { prepareBuckets } from "./prepareBuckets.ts";
import { tellWorkerToIdentify } from "./tellWorkerToIdentify.ts";
import { createShardManager, ShardManager } from "./shardManager.ts";
import { stop } from "./stop.ts";
export type GatewayManager = ReturnType<typeof createGatewayManager>;
/** Create a new Gateway Manager.
*
* @param options: Customize every bit of the manager. If something is not
* provided, it will fallback to a default which should be suitable for most
* bots.
*/
export function createGatewayManager(
options: PickPartial<CreateGatewayManager, "handleDiscordPayload" | "gatewayBot" | "gatewayConfig">,
) {
const prepareBucketsOverwritten = options.prepareBuckets ?? prepareBuckets;
const spawnShardsOverwritten = options.spawnShards ?? spawnShards;
const stopOverwritten = options.stop ?? stop;
const tellWorkerToIdentifyOverwritten = options.tellWorkerToIdentify ?? tellWorkerToIdentify;
const calculateTotalShardsOverwritten = options.calculateTotalShards ?? calculateTotalShards;
const calculateWorkerIdOverwritten = options.calculateWorkerId ?? calculateWorkerId;
const totalShards = options.totalShards ?? options.gatewayBot.shards ?? 1;
const gatewayManager = {
// ----------
// PROPERTIES
// ----------
/** The max concurrency buckets.
* Those will be created when the `spawnShards` (which calls `prepareBuckets` under the hood) function gets called.
*/
buckets: new Map<
number,
{
workers: { id: number; queue: number[] }[];
leak: LeakyBucket;
}
>(),
/** Id of the first Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
firstShardId: options.firstShardId ?? 0,
/** Important data which is used by the manager to connect shards to the gateway. */
gatewayBot: options.gatewayBot,
/** Id of the last Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
lastShardId: options.lastShardId ?? totalShards - 1 ?? 1,
/** This is where the Shards get stored.
* This will not be used when having a custom workers solution.
*/
manager: {} as ShardManager,
/** Delay in milliseconds to wait before spawning next shard.
* OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!!
*/
spawnShardDelay: options.spawnShardDelay ?? 5300,
/** How many Shards should get assigned to a Worker.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*
* NOTE: The last Worker will IGNORE this value,
* which means that the last worker can get assigned an unlimited amount of shards.
* This is not a bug but intended behavior and means you have to assign more workers to this manager.
*/
shardsPerWorker: options.shardsPerWorker ?? 25,
/** The total amount of Workers which get controlled by this manager.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*/
totalWorkers: options.totalWorkers ?? 4,
// ----------
// PROPERTIES
// ----------
/** Prepares the buckets for identifying.
*
* NOTE: Most of the time this function does not need to be called,
* since it gets called by the `spawnShards` function indirectly.
*/
prepareBuckets: function () {
return prepareBucketsOverwritten(this);
},
/** This function starts to spawn the Shards assigned to this manager.
*
* The managers `buckets` will be created and
*
* if `resharding.useOptimalLargeBotSharding` is set to true,
* `totalShards` gets double checked and adjusted accordingly if wrong.
*/
spawnShards: function () {
return spawnShardsOverwritten(this);
},
/** Stop the gateway. This closes all shards. */
stop: function (code: number, reason: string) {
return stopOverwritten(this, code, reason);
},
/** Tell the Worker with this Id to identify this Shard.
*
* Useful if a custom Worker solution should be used.
*
* IMPORTANT: Discordeno will NOT spawn Workers for you.
* Instead you have to overwrite the `tellWorkerToIdentify` function to make that for you.
* Look at the [BigBot template gateway solution](https://github.com/discordeno/discordeno/tree/main/template/bigbot/src/gateway) for reference.
*/
tellWorkerToIdentify: function (workerId: number, shardId: number, bucketId: number) {
return tellWorkerToIdentifyOverwritten(this, workerId, shardId, bucketId);
},
// TODO: fix debug
/** Handle the different logs. Used for debugging. */
debug: options.debug || function () {},
// /** The methods related to resharding. */
// resharding: {
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
// useOptimalLargeBotSharding: options.resharding?.useOptimalLargeBotSharding ?? true,
// /** Whether or not to automatically reshard.
// *
// * @default true
// */
// reshard: options.resharding?.reshard ?? true,
// /** The percentage at which resharding should occur.
// *
// * @default 80
// */
// reshardPercentage: options.resharding?.reshardPercentage ?? 80,
// /** Handles resharding the bot when necessary. */
// resharder: options.resharding?.resharder ?? resharder,
// /** Handles checking if all new shards are online in the new gateway. */
// isPending: options.resharding?.isPending ?? resharderIsPending,
// /** Handles closing all shards in the old gateway. */
// closeOldShards: options.resharding?.closeOldShards ?? resharderCloseOldShards,
// /** Handles checking if it is time to reshard and triggers the resharder. */
// check: options.resharding?.check ?? startReshardingChecks,
// /** Handler to mark a guild id with its new shard id in cache. */
// markNewGuildShardId: options.resharding?.markNewGuildShardId ?? markNewGuildShardId,
// /** Handler to update all guilds in cache with the new shard id. */
// editGuildShardIds: options.resharding?.editGuildShardIds ?? reshardingEditGuildShardIds,
// },
/** Calculate the amount of Shards which should be used based on the bot's max concurrency. */
calculateTotalShards: function () {
return calculateTotalShardsOverwritten(this);
},
/** Calculate the Id of the Worker related to this Shard. */
calculateWorkerId: function (shardId: number) {
return calculateWorkerIdOverwritten(this, shardId);
},
};
gatewayManager.manager = createShardManager({
createShardOptions: options.createShardOptions,
gatewayConfig: options.gatewayConfig,
shardIds: [],
totalShards,
handleMessage: function (shard, message) {
return options.handleDiscordPayload(shard, message);
},
requestIdentify: async (shardId) => {
// TODO: improve
await gatewayManager.buckets.get(shardId % gatewayManager.gatewayBot.sessionStartLimit.maxConcurrency)!.leak
.acquire(1);
},
});
return gatewayManager;
}
export interface CreateGatewayManager {
/** Delay in milliseconds to wait before spawning next shard. OPTIMAL IS ABOVE 5100. YOU DON'T WANT TO HIT THE RATE LIMIT!!! */
spawnShardDelay: number;
/** Total amount of shards your bot uses. Useful for zero-downtime updates or resharding. */
totalShards: number;
/** The amount of shards to load per worker. */
shardsPerWorker: number;
/** The total amount of workers to use for your bot. */
totalWorkers: number;
/** Id of the first Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
firstShardId: number;
/** Id of the last Shard which should get controlled by this manager.
*
* NOTE: This is intended for testing purposes
* if big bots want to test the gateway on smaller scale.
* This is not recommended to be used in production.
*/
lastShardId: number;
/** Important data which is used by the manager to connect shards to the gateway. */
gatewayBot: GatewayBot;
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
/** Options which are used to create a new shard. */
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
/** Stored as bucketId: { workers: [workerId, [ShardIds]], createNextShard: boolean } */
buckets: Map<
number,
{
workers: { id: number; queue: number[] }[];
leak: LeakyBucket;
}
>;
// METHODS
/** Prepares the buckets for identifying */
prepareBuckets: typeof prepareBuckets;
/** The handler for spawning ALL the shards. */
spawnShards: typeof spawnShards;
/** The handler to close all shards. */
stop: typeof stop;
/** Sends the discord payload to another server. */
handleDiscordPayload: (shard: Shard, data: DiscordGatewayPayload) => any;
/** Tell the worker to begin identifying this shard */
tellWorkerToIdentify: typeof tellWorkerToIdentify;
/** Handle the different logs. Used for debugging. */
debug: (text: GatewayDebugEvents, ...args: any[]) => unknown;
/** The methods related to resharding. */
// resharding: {
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when you are above 100K servers. */
// useOptimalLargeBotSharding: boolean;
// /** Whether or not to automatically reshard. */
// reshard: boolean;
// /** The percentage at which resharding should occur. */
// reshardPercentage: number;
// /** Handles resharding the bot when necessary. */
// resharder: typeof resharder;
// /** Handles checking if all new shards are online in the new gateway. */
// isPending: typeof resharderIsPending;
// /** Handles closing all shards in the old gateway. */
// closeOldShards: typeof resharderCloseOldShards;
// /** Handler to mark a guild id with its new shard id in cache. */
// markNewGuildShardId: typeof markNewGuildShardId;
// /** Handler to update all guilds in cache with the new shard id. */
// editGuildShardIds: typeof reshardingEditGuildShardIds;
// };
/** Calculates the number of shards to use based on the max concurrency */
calculateTotalShards: typeof calculateTotalShards;
/** Calculate the id of the worker related ot this Shard. */
calculateWorkerId: typeof calculateWorkerId;
}
export type GatewayDebugEvents =
| "GW ERROR"
| "GW CLOSED"
| "GW CLOSED_RECONNECT"
| "GW RAW"
| "GW RECONNECT"
| "GW INVALID_SESSION"
| "GW RESUMED"
| "GW RESUMING"
| "GW IDENTIFYING"
| "GW RAW_SEND"
| "GW MAX REQUESTS"
| "GW DEBUG"
| "GW HEARTBEATING"
| "GW HEARTBEATING_STARTED"
| "GW HEARTBEATING_DETAILS"
| "GW HEARTBEATING_CLOSED";

8
vendor/gateway/manager/mod.ts vendored Normal file
View File

@ -0,0 +1,8 @@
export * from "./calculateTotalShards.ts";
export * from "./calculateWorkerId.ts";
export * from "./gatewayManager.ts";
export * from "./prepareBuckets.ts";
export * from "./shardManager.ts";
export * from "./spawnShards.ts";
export * from "./stop.ts";
export * from "./tellWorkerToIdentify.ts";

View File

@ -0,0 +1,47 @@
import { createLeakyBucket } from "../../util/bucket.ts";
import { GatewayManager } from "./gatewayManager.ts";
export function prepareBuckets(gateway: GatewayManager) {
for (let i = 0; i < gateway.gatewayBot.sessionStartLimit.maxConcurrency; ++i) {
gateway.buckets.set(i, {
workers: [],
leak: createLeakyBucket({
max: 1,
refillAmount: 1,
// special number which is proven to be working dont change
refillInterval: gateway.spawnShardDelay,
}),
});
}
// ORGANIZE ALL SHARDS INTO THEIR OWN BUCKETS
for (let shardId = gateway.firstShardId; shardId <= gateway.lastShardId; ++shardId) {
// gateway.debug("GW DEBUG", `1. Running for loop in spawnShards function for shardId ${i}.`);
if (shardId >= gateway.manager.totalShards) {
throw new Error(
`Shard (id: ${shardId}) is bigger or equal to the used amount of used shards which is ${gateway.manager.totalShards}`,
);
}
const bucketId = shardId % gateway.gatewayBot.sessionStartLimit.maxConcurrency;
const bucket = gateway.buckets.get(bucketId);
if (!bucket) {
throw new Error(
`Shard (id: ${shardId}) got assigned to an illegal bucket id: ${bucketId}, expected a bucket id between 0 and ${
gateway.gatewayBot.sessionStartLimit.maxConcurrency - 1
}`,
);
}
// FIND A QUEUE IN THIS BUCKET THAT HAS SPACE
// const worker = bucket.workers.find((w) => w.queue.length < gateway.shardsPerWorker);
const workerId = gateway.calculateWorkerId(shardId);
const worker = bucket.workers.find((w) => w.id === workerId);
if (worker) {
// IF THE QUEUE HAS SPACE JUST ADD IT TO THIS QUEUE
worker.queue.push(shardId);
} else {
bucket.workers.push({ id: workerId, queue: [shardId] });
}
}
}

339
vendor/gateway/manager/resharder.ts vendored Normal file
View File

@ -0,0 +1,339 @@
import { GatewayBot } from "../../types/shared.ts";
import { createGatewayManager, GatewayManager } from "./gatewayManager.ts";
export type Resharder = ReturnType<typeof activateResharder>;
export function activateResharder(options: ActivateResharderOptions) {
const resharder = {
// ----------
// PROPERTIES
// ----------
/** Interval in milliseconds of when to check whether it's time to reshard.
*
* @default 28800000 (8 hours)
*/
checkInterval: options.checkInterval || 28800000,
/** Gateway manager which is currently processing all shards and events. */
gateway: options.gatewayManager,
/** Timeout of the reshard checker. */
intervalId: undefined as number | undefined,
/** Percentage at which resharding should occur.
* @default 80
*/
percentage: options.percentage ?? 80,
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
useOptimalLargeBotSharding: options.useOptimalLargeBotSharding ?? true,
// ----------
// METHODS
// ----------
/** Activate the resharder and delay the next reshard check. */
activate: function () {
return activate(this);
},
/** Function which is used to fetch the current gateway information of the bot.
* This function is mainly used by the reshard checker.
*/
getGatewayBot: options.getGatewayBot,
/** Reshard the bots gateway. */
reshard: function (gatewayBot: GatewayBot) {
return reshard(this, gatewayBot);
},
tellWorkerToPrepare: options.tellWorkerToPrepare,
};
resharder.activate();
return resharder;
}
// /** The methods related to resharding. */
// resharding: {
// /** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
// useOptimalLargeBotSharding: options.resharding?.useOptimalLargeBotSharding ?? true,
// /** Whether or not to automatically reshard.
// *
// * @default true
// */
// reshard: options.resharding?.reshard ?? true,
// /** The percentage at which resharding should occur.
// *
// * @default 80
// */
// reshardPercentage: options.resharding?.reshardPercentage ?? 80,
// /** Handles resharding the bot when necessary. */
// resharder: options.resharding?.resharder ?? resharder,
// /** Handles checking if all new shards are online in the new gateway. */
// isPending: options.resharding?.isPending ?? resharderIsPending,
// /** Handles closing all shards in the old gateway. */
// closeOldShards: options.resharding?.closeOldShards ?? resharderCloseOldShards,
// /** Handles checking if it is time to reshard and triggers the resharder. */
// check: options.resharding?.check ?? startReshardingChecks,
// /** Handler to mark a guild id with its new shard id in cache. */
// markNewGuildShardId: options.resharding?.markNewGuildShardId ?? markNewGuildShardId,
// /** Handler to update all guilds in cache with the new shard id. */
// editGuildShardIds: options.resharding?.editGuildShardIds ?? reshardingEditGuildShardIds,
// },
export interface ActivateResharderOptions {
/** Interval in milliseconds of when to check whether it's time to reshard.
*
* @default 28800000 (8 hours)
*/
checkInterval?: number;
/** Gateway manager which the resharder should be bound to. */
gatewayManager: GatewayManager;
/** Percentage at which resharding should occur.
* @default 80
*/
percentage?: number;
/** Whether the resharder should automatically switch to LARGE BOT SHARDING when the bot is above 100K servers. */
useOptimalLargeBotSharding?: boolean;
/** Function which can be used to fetch the current gateway information of the bot.
* This function is mainly used by the reshard checker.
*/
getGatewayBot(): Promise<GatewayBot>;
/** Function which is used to tell a Worker that it should identify a resharder Shard to the gateway and wait for further instructions.
* The worker should **NOT** process any events coming from this Shard.
*/
tellWorkerToPrepare(
gatewayManager: GatewayManager,
workerId: number,
shardId: number,
bucketId: number,
): Promise<void>;
}
/** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
export function activate(resharder: Resharder): void {
if (resharder.intervalId !== undefined) {
throw new Error("[RESHARDER] Cannot activate the resharder more than one time.");
}
resharder.intervalId = setInterval(async () => {
// gateway.debug("GW DEBUG", "[Resharding] Checking if resharding is needed.");
// TODO: is it possible to route this to REST?
const result = await resharder.getGatewayBot();
const percentage =
((result.shards - resharder.gateway.manager.totalShards) / resharder.gateway.manager.totalShards) * 100;
// Less than necessary% being used so do nothing
if (percentage < resharder.percentage) return;
// Don't have enough identify rate limits to reshard
if (result.sessionStartLimit.remaining < result.shards) return;
// MULTI-SERVER BOTS OVERRIDE THIS IF YOU NEED TO RESHARD SERVER BY SERVER
return resharder.reshard(result);
}, resharder.checkInterval);
}
export async function reshard(resharder: Resharder, gatewayBot: GatewayBot) {
// oldGateway.debug("GW DEBUG", "[Resharding] Starting the reshard process.");
// Create a temporary gateway manager for easier handling.
const tmpManager = createGatewayManager({
gatewayBot: gatewayBot,
gatewayConfig: resharder.gateway.manager.gatewayConfig,
handleDiscordPayload: () => {},
tellWorkerToIdentify: resharder.tellWorkerToPrepare,
});
// Begin resharding
// If more than 100K servers, begin switching to 16x sharding
if (resharder.useOptimalLargeBotSharding) {
// gateway.debug("GW DEBUG", "[Resharding] Using optimal large bot sharding solution.");
tmpManager.manager.totalShards = resharder.gateway.calculateTotalShards(resharder.gateway);
}
tmpManager.spawnShards(tmpManager);
return new Promise((resolve) => {
// TIMER TO KEEP CHECKING WHEN ALL SHARDS HAVE RESHARDED
const timer = setInterval(async () => {
const pending = await gateway.resharding.isPending(gateway, oldGateway);
// STILL PENDING ON SOME SHARDS TO BE CREATED
if (pending) return;
// ENABLE EVENTS ON NEW SHARDS AND IGNORE EVENTS ON OLD
const oldHandler = oldGateway.handleDiscordPayload;
gateway.handleDiscordPayload = oldHandler;
oldGateway.handleDiscordPayload = function (og, data, shardId) {
// ALLOW EXCEPTION FOR CHUNKING TO PREVENT REQUESTS FREEZING
if (data.t !== "GUILD_MEMBERS_CHUNK") return;
oldHandler(og, data, shardId);
};
// STOP TIMER
clearInterval(timer);
await gateway.resharding.editGuildShardIds();
await gateway.resharding.closeOldShards(oldGateway);
gateway.debug("GW DEBUG", "[Resharding] Complete.");
resolve(gateway);
}, 30000);
}) as Promise<GatewayManager>;
}
// /** The handler to automatically reshard when necessary. */
// export async function resharder(
// oldGateway: GatewayManager,
// results: GatewayBot,
// ) {
// oldGateway.debug("GW DEBUG", "[Resharding] Starting the reshard process.");
// const gateway = createGatewayManager({
// ...oldGateway,
// // RESET THE SETS AND COLLECTIONS
// // cache: {
// // guildIds: new Set(),
// // loadingGuildIds: new Set(),
// // editedMessages: new Collection(),
// // },
// shards: new Collection(),
// // loadingShards: new Collection(),
// buckets: new Collection(),
// // utf8decoder: new TextDecoder(),
// });
// for (const [key, value] of Object.entries(oldGateway)) {
// if (key === "handleDiscordPayload") {
// gateway.handleDiscordPayload = async function (_, data, shardId) {
// if (data.t === "READY") {
// const payload = data.d as DiscordReady;
// await gateway.resharding.markNewGuildShardId(payload.guilds.map((g) => BigInt(g.id)), shardId);
// }
// };
// continue;
// }
// // USE ANY CUSTOMIZED OPTIONS FROM OLD GATEWAY
// // @ts-ignore TODO: fix this dynamical assignment
// gateway[key] = oldGateway[key as keyof typeof oldGateway];
// }
// // Begin resharding
// gateway.maxShards = results.shards;
// // FOR MANUAL SHARD CONTROL, OVERRIDE THIS SHARD ID!
// gateway.lastShardId = oldGateway.lastShardId === oldGateway.maxShards ? gateway.maxShards : oldGateway.lastShardId;
// gateway.shardsRecommended = results.shards;
// gateway.sessionStartLimitTotal = results.sessionStartLimit.total;
// gateway.sessionStartLimitRemaining = results.sessionStartLimit.remaining;
// gateway.sessionStartLimitResetAfter = results.sessionStartLimit.resetAfter;
// gateway.maxConcurrency = results.sessionStartLimit.maxConcurrency;
// // If more than 100K servers, begin switching to 16x sharding
// if (gateway.useOptimalLargeBotSharding) {
// gateway.debug("GW DEBUG", "[Resharding] Using optimal large bot sharding solution.");
// gateway.maxShards = gateway.calculateTotalShards(gateway.maxShards, results.sessionStartLimit.maxConcurrency);
// }
// gateway.spawnShards(gateway, gateway.firstShardId);
// return new Promise((resolve) => {
// // TIMER TO KEEP CHECKING WHEN ALL SHARDS HAVE RESHARDED
// const timer = setInterval(async () => {
// const pending = await gateway.resharding.isPending(gateway, oldGateway);
// // STILL PENDING ON SOME SHARDS TO BE CREATED
// if (pending) return;
// // ENABLE EVENTS ON NEW SHARDS AND IGNORE EVENTS ON OLD
// const oldHandler = oldGateway.handleDiscordPayload;
// gateway.handleDiscordPayload = oldHandler;
// oldGateway.handleDiscordPayload = function (og, data, shardId) {
// // ALLOW EXCEPTION FOR CHUNKING TO PREVENT REQUESTS FREEZING
// if (data.t !== "GUILD_MEMBERS_CHUNK") return;
// oldHandler(og, data, shardId);
// };
// // STOP TIMER
// clearInterval(timer);
// await gateway.resharding.editGuildShardIds();
// await gateway.resharding.closeOldShards(oldGateway);
// gateway.debug("GW DEBUG", "[Resharding] Complete.");
// resolve(gateway);
// }, 30000);
// }) as Promise<GatewayManager>;
// }
/** Handler that by default will check all new shards are online in the new gateway. The handler can be overridden if you have multiple servers to communicate through redis pubsub or whatever you prefer. */
export async function resharderIsPending(
gateway: GatewayManager,
oldGateway: GatewayManager,
) {
for (let i = gateway.firstShardId; i < gateway.lastShardId; i++) {
const shard = gateway.shards.get(i);
if (!shard?.ready) {
return true;
}
}
return false;
}
/** Handler that by default closes all shards in the old gateway. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
export async function resharderCloseOldShards(oldGateway: GatewayManager) {
// SHUT DOWN ALL SHARDS IF NOTHING IN QUEUE
oldGateway.shards.forEach((shard) => {
// CLOSE THIS SHARD IT HAS NO QUEUE
if (!shard.processingQueue && !shard.queue.length) {
return oldGateway.closeWS(
shard.ws,
3066,
"Shard has been resharded. Closing shard since it has no queue.",
);
}
// IF QUEUE EXISTS GIVE IT 5 MINUTES TO COMPLETE
setTimeout(() => {
oldGateway.closeWS(
shard.ws,
3066,
"Shard has been resharded. Delayed closing shard since it had a queue.",
);
}, 300000);
});
}
// /** Handler that by default will check to see if resharding should occur. Can be overridden if you have multiple servers and you want to communicate through redis pubsub or whatever you prefer. */
// export async function startReshardingChecks(gateway: GatewayManager) {
// gateway.debug("GW DEBUG", "[Resharding] Checking if resharding is needed.");
//
// // TODO: is it possible to route this to REST?
// const results = (await fetch(`https://discord.com/api/gateway/bot`, {
// headers: {
// Authorization: `Bot ${gateway.token}`,
// },
// }).then((res) => res.json()).then((res) => transformGatewayBot(res))) as GatewayBot;
//
// const percentage = ((results.shards - gateway.maxShards) / gateway.maxShards) * 100;
// // Less than necessary% being used so do nothing
// if (percentage < gateway.reshardPercentage) return;
//
// // Don't have enough identify rate limits to reshard
// if (results.sessionStartLimit.remaining < results.shards) return;
//
// // MULTI-SERVER BOTS OVERRIDE THIS IF YOU NEED TO RESHARD SERVER BY SERVER
// return gateway.resharding.resharder(gateway, results);
// }
/** Handler that by default will save the new shard id for each guild this becomes ready in new gateway. This can be overridden to save the shard ids in a redis cache layer or whatever you prefer. These ids will be used later to update all guilds. */
export async function markNewGuildShardId(guildIds: bigint[], shardId: number) {
// PLACEHOLDER TO LET YOU MARK A GUILD ID AND SHARD ID FOR LATER USE ONCE RESHARDED
}
/** Handler that by default does not do anything since by default the library will not cache. */
export async function reshardingEditGuildShardIds() {
// PLACEHOLDER TO LET YOU UPDATE CACHED GUILDS
}

122
vendor/gateway/manager/shardManager.ts vendored Normal file
View File

@ -0,0 +1,122 @@
import { DiscordGatewayPayload } from "../../types/discord.ts";
import { PickPartial } from "../../types/shared.ts";
import { Collection } from "../../util/collection.ts";
import { CreateShard, createShard } from "../shard/createShard.ts";
import { Shard, ShardGatewayConfig } from "../shard/types.ts";
// TODO: debug
/** This is a Shard manager.
* This does not manage a specific range of Shard but the provided Shards on create or when an identify is requested.
* The aim of this is to provide an easy to use manager which can be used by workers or any other kind of separate process.
*/
export type ShardManager = ReturnType<typeof createShardManager>;
/** Create a new Shard manager.
* This does not manage a specific range of Shard but the provided Shards on create or when an identify is requested.
* The aim of this is to provide an easy to use manager which can be used by workers or any other kind of separate process.
*/
export function createShardManager(options: CreateShardManager) {
return {
// ----------
// PROPERTIES
// ----------
/** Options which are used to create a new Shard. */
createShardOptions: {
...options.createShardOptions,
events: {
...options.createShardOptions?.events,
message: options.createShardOptions?.events?.message ?? options.handleMessage,
},
},
/** Gateway configuration which is used when creating a Shard. */
gatewayConfig: options.gatewayConfig,
/** Managed Shards. */
shards: new Collection(
options.shardIds.map((shardId) => {
const shard = createShard({
...options.createShardOptions,
id: shardId,
totalShards: options.totalShards,
gatewayConfig: options.gatewayConfig,
requestIdentify: async function () {
return await options.requestIdentify(shardId);
},
});
return [shardId, shard] as const;
}),
),
/** Total amount of Shards used by the bot. */
totalShards: options.totalShards,
// ----------
// METHODS
// ----------
/** Tell the manager to identify a Shard.
* If this Shard is not already managed this will also add the Shard to the manager.
*/
identify: async function (shardId: number) {
let shard = this.shards.get(shardId);
if (!shard) {
shard = createShard({
...this.createShardOptions,
id: shardId,
totalShards: this.totalShards,
gatewayConfig: this.gatewayConfig,
requestIdentify: async function () {
return await options.requestIdentify(shardId);
},
});
this.shards.set(shardId, shard);
}
return await shard.identify();
},
/** Kill a shard.
* Close a shards connection to Discord's gateway (if any) and remove it from the manager.
*/
kill: async function (shardId: number) {
const shard = this.shards.get(shardId);
if (!shard) return;
this.shards.delete(shardId);
return await shard.shutdown();
},
/** This function communicates with the parent manager,
* in order to know whether this manager is allowed to identify a new shard.
*/
requestIdentify: options.requestIdentify,
};
}
export interface CreateShardManager {
// ----------
// PROPERTIES
// ----------
/** Options which are used to create a new Shard. */
createShardOptions?: Omit<CreateShard, "id" | "totalShards" | "requestIdentify" | "gatewayConfig">;
/** Gateway configuration which is used when creating a Shard. */
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
/** Ids of the Shards which should be managed. */
shardIds: number[];
/** Total amount of Shard used by the bot. */
totalShards: number;
// ----------
// METHODS
// ----------
/** This function is used when a shard receives any message from Discord. */
handleMessage(shard: Shard, message: DiscordGatewayPayload): unknown;
/** This function communicates with the parent manager,
* in order to know whether this manager is allowed to identify a new shard. #
*/
requestIdentify(shardId: number): Promise<void>;
}

32
vendor/gateway/manager/spawnShards.ts vendored Normal file
View File

@ -0,0 +1,32 @@
import { GatewayIntents } from "../../types/shared.ts";
import { createLeakyBucket } from "../../util/bucket.ts";
import { createShard } from "../shard/createShard.ts";
import { Shard } from "../shard/types.ts";
import { createGatewayManager, GatewayManager } from "./gatewayManager.ts";
/** Begin spawning shards. */
export function spawnShards(gateway: GatewayManager) {
// PREPARES THE MAX SHARD COUNT BY CONCURRENCY
// if (manager.resharding.useOptimalLargeBotSharding) {
// // gateway.debug("GW DEBUG", "[Spawning] Using optimal large bot sharding solution.");
// manager.manager.totalShards = manager.calculateTotalShards(
// manager,
// );
// }
// PREPARES ALL SHARDS IN SPECIFIC BUCKETS
gateway.prepareBuckets();
// SPREAD THIS OUT TO DIFFERENT WORKERS TO BEGIN STARTING UP
gateway.buckets.forEach(async (bucket, bucketId) => {
// gateway.debug("GW DEBUG", `2. Running forEach loop in spawnShards function.`);
for (const worker of bucket.workers) {
// gateway.debug("GW DEBUG", `3. Running for of loop in spawnShards function.`);
for (const shardId of worker.queue) {
await gateway.tellWorkerToIdentify(worker.id, shardId, bucketId);
}
}
});
}

8
vendor/gateway/manager/stop.ts vendored Normal file
View File

@ -0,0 +1,8 @@
import { delay } from "../../util/delay.ts";
import { GatewayManager } from "./gatewayManager.ts";
export async function stop(gateway: GatewayManager, code: number, reason: string) {
gateway.manager.shards.forEach((shard) => shard.close(code, reason));
await delay(5000);
}

View File

@ -0,0 +1,13 @@
import { GatewayIntents } from "../../types/shared.ts";
import { createShard } from "../shard/createShard.ts";
import { GatewayManager } from "./gatewayManager.ts";
/** Allows users to hook in and change to communicate to different workers across different servers or anything they like. For example using redis pubsub to talk to other servers. */
export async function tellWorkerToIdentify(
gateway: GatewayManager,
_workerId: number,
shardId: number,
_bucketId: number,
): Promise<void> {
return await gateway.manager.identify(shardId);
}

2
vendor/gateway/mod.ts vendored Normal file
View File

@ -0,0 +1,2 @@
export * from "./manager/mod.ts";
export * from "./shard/mod.ts";

View File

@ -0,0 +1,9 @@
import { Shard } from "./types.ts";
export function calculateSafeRequests(shard: Shard) {
// * 2 adds extra safety layer for discords OP 1 requests that we need to respond to
const safeRequests = shard.maxRequestsPerRateLimitTick -
Math.ceil(shard.rateLimitResetInterval / shard.heart.interval) * 2;
return safeRequests < 0 ? 0 : safeRequests;
}

7
vendor/gateway/shard/close.ts vendored Normal file
View File

@ -0,0 +1,7 @@
import { Shard } from "./types.ts";
export function close(shard: Shard, code: number, reason: string): void {
if (shard.socket?.readyState !== WebSocket.OPEN) return;
return shard.socket?.close(code, reason);
}

34
vendor/gateway/shard/connect.ts vendored Normal file
View File

@ -0,0 +1,34 @@
import { Shard, ShardState } from "./types.ts";
export async function connect(shard: Shard): Promise<void> {
// Only set the shard to `Connecting` state,
// if the connection request does not come from an identify or resume action.
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
shard.state = ShardState.Connecting;
}
shard.events.connecting?.(shard);
// Explicitly setting the encoding to json, since we do not support ETF.
const socket = new WebSocket(`${shard.gatewayConfig.url}/?v=${shard.gatewayConfig.version}&encoding=json`);
shard.socket = socket;
// TODO: proper event handling
socket.onerror = (event) => console.log({ error: event });
socket.onclose = (event) => shard.handleClose(event);
socket.onmessage = (message) => shard.handleMessage(message);
return new Promise((resolve) => {
socket.onopen = () => {
// Only set the shard to `Unidentified` state,
// if the connection request does not come from an identify or resume action.
if (![ShardState.Identifying, ShardState.Resuming].includes(shard.state)) {
shard.state = ShardState.Unidentified;
}
shard.events.connected?.(shard);
resolve();
};
});
}

333
vendor/gateway/shard/createShard.ts vendored Normal file
View File

@ -0,0 +1,333 @@
import { identify } from "./identify.ts";
import { handleMessage } from "./handleMessage.ts";
import {
DEFAULT_HEARTBEAT_INTERVAL,
GATEWAY_RATE_LIMIT_RESET_INTERVAL,
MAX_GATEWAY_REQUESTS_PER_INTERVAL,
Shard,
ShardEvents,
ShardGatewayConfig,
ShardHeart,
ShardSocketCloseCodes,
ShardSocketRequest,
ShardState,
} from "./types.ts";
import { startHeartbeating } from "./startHeartbeating.ts";
import { stopHeartbeating } from "./stopHeartbeating.ts";
import { resume } from "./resume.ts";
import { createLeakyBucket, LeakyBucket } from "../../util/bucket.ts";
import { calculateSafeRequests } from "./calculateSafeRequests.ts";
import { send } from "./send.ts";
import { handleClose } from "./handleClose.ts";
import { connect } from "./connect.ts";
import { close } from "./close.ts";
import { shutdown } from "./shutdown.ts";
import { isOpen } from "./isOpen.ts";
import { DiscordGatewayPayload, DiscordStatusUpdate } from "../../types/discord.ts";
import { GatewayIntents, PickPartial } from "../../types/shared.ts";
import { API_VERSION } from "../../util/constants.ts";
// TODO: debug
// TODO: function overwrite
// TODO: improve shard event resolving
/** */
export function createShard(
options: CreateShard,
) {
// This is done for performance reasons
const calculateSafeRequestsOverwritten = options.calculateSafeRequests ?? calculateSafeRequests;
const closeOverwritten = options.close ?? close;
const connectOverwritten = options.connect ?? connect;
const identifyOverwritten = options.identify ?? identify;
const sendOverwritten = options.send ?? send;
const shutdownOverwritten = options.shutdown ?? shutdown;
const resumeOverwritten = options.resume ?? resume;
const handleCloseOverwritten = options.handleClose ?? handleClose;
const handleMessageOverwritten = options.handleMessage ?? handleMessage;
const isOpenOverwritten = options.isOpen ?? isOpen;
const startHeartbeatingOverwritten = options.startHeartbeating ?? startHeartbeating;
const stopHeartbeatingOverwritten = options.stopHeartbeating ?? stopHeartbeating;
return {
// ----------
// PROPERTIES
// ----------
/** The gateway configuration which is used to connect to Discord. */
gatewayConfig: {
compress: options.gatewayConfig.compress ?? false,
intents: options.gatewayConfig.intents ?? 0,
properties: {
os: options.gatewayConfig?.properties?.os ?? Deno.build.os,
browser: options.gatewayConfig?.properties?.browser ?? "Discordeno",
device: options.gatewayConfig?.properties?.device ?? "Discordeno",
},
token: options.gatewayConfig.token,
url: options.gatewayConfig.url ?? "wss://gateway.discord.gg",
version: options.gatewayConfig.version ?? API_VERSION,
} as ShardGatewayConfig,
/** This contains all the heartbeat information */
heart: {
acknowledged: false,
interval: DEFAULT_HEARTBEAT_INTERVAL,
} as ShardHeart,
/** Id of the shard. */
id: options.id,
/** The maximum of requests which can be send to discord per rate limit tick.
* Typically this value should not be changed.
*/
maxRequestsPerRateLimitTick: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
/** The previous payload sequence number. */
previousSequenceNumber: options.previousSequenceNumber || null,
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
rateLimitResetInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
/** Current session id of the shard if present. */
sessionId: undefined as string | undefined,
/** This contains the WebSocket connection to Discord, if currently connected. */
socket: undefined as WebSocket | undefined,
/** Current internal state of the shard. */
state: ShardState.Offline,
/** The total amount of shards which are used to communicate with Discord. */
totalShards: options.totalShards,
// ----------
// METHODS
// ----------
/** The shard related event handlers. */
events: options.events ?? {} as ShardEvents,
/** Calculate the amount of requests which can safely be made per rate limit interval,
* before the gateway gets disconnected due to an exceeded rate limit.
*/
calculateSafeRequests: function () {
return calculateSafeRequestsOverwritten(this);
},
/** Close the socket connection to discord if present. */
close: function (code: number, reason: string) {
return closeOverwritten(this, code, reason);
},
/** Connect the shard with the gateway and start heartbeating.
* This will not identify the shard to the gateway.
*/
connect: async function () {
return await connectOverwritten(this);
},
/** Identify the shard to the gateway.
* If not connected, this will also connect the shard to the gateway.
*/
identify: async function () {
return await identifyOverwritten(this);
},
/** Check whether the connection to Discord is currently open. */
isOpen: function () {
return isOpenOverwritten(this);
},
/** Function which can be overwritten in order to get the shards presence. */
// This function allows to be async, in case the devs create the presence based on eg. database values.
// Passing the shard's id there to make it easier for the dev to use this function.
makePresence: options.makePresence,
/** Attempt to resume the previous shards session with the gateway. */
resume: async function () {
return await resumeOverwritten(this);
},
/** Send a message to Discord.
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
*/
send: async function (message: ShardSocketRequest, highPriority: boolean = false) {
return await sendOverwritten(this, message, highPriority);
},
/** Shutdown the shard.
* Forcefully disconnect the shard from Discord.
* The shard may not attempt to reconnect with Discord.
*/
shutdown: async function () {
return await shutdownOverwritten(this);
},
/** @private Internal shard bucket.
* Only access this if you know what you are doing.
*
* Bucket for handling shard request rate limits.
*/
bucket: createLeakyBucket({
max: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
refillAmount: MAX_GATEWAY_REQUESTS_PER_INTERVAL,
}),
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle a gateway connection close.
*/
handleClose: async function (close: CloseEvent) {
return await handleCloseOverwritten(this, close);
},
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle an incoming gateway message.
*/
handleMessage: async function (message: MessageEvent<any>) {
return await handleMessageOverwritten(this, message);
},
/** This function communicates with the management process, in order to know whether its free to identify. */
requestIdentify: async function () {
return await options.requestIdentify(this.id);
},
/** @private Internal state.
* Only use this if you know what you are doing.
*
* Cache for pending gateway requests which should have been send while the gateway went offline.
*/
offlineSendQueue: [] as ((_?: unknown) => void)[],
/** @private Internal shard map.
* Only use this map if you know what you are doing.
*
* This is used to resolve internal waiting states.
* Mapped by SelectedEvents => ResolveFunction
*/
resolves: new Map<"READY" | "RESUMED" | "INVALID_SESSION", (payload: DiscordGatewayPayload) => void>(),
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Start sending heartbeat payloads to Discord in the provided interval.
*/
startHeartbeating: function (interval: number) {
return startHeartbeatingOverwritten(this, interval);
},
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Stop the heartbeating process with discord.
*/
stopHeartbeating: function () {
return stopHeartbeatingOverwritten(this);
},
};
}
export interface CreateShard {
/** Id of the shard which should be created. */
id: number;
/** Gateway configuration for the shard. */
gatewayConfig: PickPartial<ShardGatewayConfig, "token">;
/** The total amount of shards which are used to communicate with Discord. */
totalShards: number;
/** This function communicates with the management process, in order to know whether its free to identify.
* When this function resolves, this means that the shard is allowed to send an identify payload to discord.
*/
requestIdentify: (shardId: number) => Promise<void>;
/** Calculate the amount of requests which can safely be made per rate limit interval,
* before the gateway gets disconnected due to an exceeded rate limit.
*/
calculateSafeRequests?: typeof calculateSafeRequests;
/** Close the socket connection to discord if present. */
close?: typeof close;
/** Connect the shard with the gateway and start heartbeating.
* This will not identify the shard to the gateway.
*/
connect?: typeof connect;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle a gateway connection close.
*/
handleClose?: typeof handleClose;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Handle an incoming gateway message.
*/
handleMessage?: typeof handleMessage;
/** Identify the shard to the gateway.
* If not connected, this will also connect the shard to the gateway.
*/
identify?: typeof identify;
/** Check whether the connection to Discord is currently open. */
isOpen?: typeof isOpen;
/** Function which can be overwritten in order to get the shards presence. */
makePresence?(shardId: number): Promise<DiscordStatusUpdate> | DiscordStatusUpdate;
/** The maximum of requests which can be send to discord per rate limit tick.
* Typically this value should not be changed.
*/
maxRequestsPerRateLimitTick?: number;
/** The previous payload sequence number. */
previousSequenceNumber?: number;
/** In which interval (in milliseconds) the gateway resets it's rate limit. */
rateLimitResetInterval?: number;
/** Attempt to resume the previous shards session with the gateway. */
resume?: typeof resume;
/** Send a message to Discord.
* @param {boolean} [highPriority=false] - Whether this message should be send asap.
*/
send?: typeof send;
/** Shutdown the shard.
* Forcefully disconnect the shard from Discord.
* The shard may not attempt to reconnect with Discord.
*/
shutdown?: typeof shutdown;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Start sending heartbeat payloads to Discord in the provided interval.
*/
startHeartbeating?: typeof startHeartbeating;
/** Current internal state of the shard. */
state?: ShardState;
/** @private Internal shard function.
* Only use this function if you know what you are doing.
*
* Stop the heartbeating process with discord.
*/
stopHeartbeating?: typeof stopHeartbeating;
/** The shard related event handlers. */
events?: ShardEvents;
/** This contains all the heartbeat information */
heart?: ShardHeart;
/** Bucket for handling shard request rate limits. */
bucket?: LeakyBucket;
/** Cache for pending gateway requests which should have been send while the gateway went offline. */
offlineSendQueue?: ShardSocketRequest[];
/** This is used to resolve internal waiting states.
* Mapped by SelectedEvents => ResolveFunction
*/
resolves?: Shard["resolves"];
}

1
vendor/gateway/shard/deps.ts vendored Normal file
View File

@ -0,0 +1 @@
export { decompress_with as decompressWith } from "https://unpkg.com/@evan/wasm@0.0.94/target/zlib/deno.js";

63
vendor/gateway/shard/handleClose.ts vendored Normal file
View File

@ -0,0 +1,63 @@
import { GatewayCloseEventCodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function handleClose(shard: Shard, close: CloseEvent): Promise<void> {
// gateway.debug("GW CLOSED", { shardId, payload: event });
shard.stopHeartbeating();
switch (close.code) {
case ShardSocketCloseCodes.TestingFinished: {
shard.state = ShardState.Offline;
shard.events.disconnected?.(shard);
return;
}
// On these codes a manual start will be done.
case ShardSocketCloseCodes.Shutdown:
case ShardSocketCloseCodes.ReIdentifying:
case ShardSocketCloseCodes.Resharded:
case ShardSocketCloseCodes.ResumeClosingOldConnection:
case ShardSocketCloseCodes.ZombiedConnection: {
shard.state = ShardState.Disconnected;
shard.events.disconnected?.(shard);
// gateway.debug("GW CLOSED_RECONNECT", { shardId, payload: event });
return;
}
// Gateway connection closes which require a new identify.
case GatewayCloseEventCodes.UnknownOpcode:
case GatewayCloseEventCodes.NotAuthenticated:
case GatewayCloseEventCodes.InvalidSeq:
case GatewayCloseEventCodes.RateLimited:
case GatewayCloseEventCodes.SessionTimedOut: {
shard.state = ShardState.Identifying;
shard.events.disconnected?.(shard);
return await shard.identify();
}
// When these codes are received something went really wrong.
// On those we cannot start a reconnect attempt.
case GatewayCloseEventCodes.AuthenticationFailed:
case GatewayCloseEventCodes.InvalidShard:
case GatewayCloseEventCodes.ShardingRequired:
case GatewayCloseEventCodes.InvalidApiVersion:
case GatewayCloseEventCodes.InvalidIntents:
case GatewayCloseEventCodes.DisallowedIntents: {
shard.state = ShardState.Offline;
shard.events.disconnected?.(shard);
throw new Error(close.reason || "Discord gave no reason! GG! You broke Discord!");
}
// Gateway connection closes on which a resume is allowed.
case GatewayCloseEventCodes.UnknownError:
case GatewayCloseEventCodes.DecodeError:
case GatewayCloseEventCodes.AlreadyAuthenticated:
default: {
shard.state = ShardState.Resuming;
shard.events.disconnected?.(shard);
return await shard.resume();
}
}
}

156
vendor/gateway/shard/handleMessage.ts vendored Normal file
View File

@ -0,0 +1,156 @@
import { DiscordGatewayPayload, DiscordHello, DiscordReady } from "../../types/discord.ts";
import { GatewayOpcodes } from "../../types/shared.ts";
import { createLeakyBucket } from "../../util/bucket.ts";
import { delay } from "../../util/delay.ts";
import { decompressWith } from "./deps.ts";
import { GATEWAY_RATE_LIMIT_RESET_INTERVAL, Shard, ShardState } from "./types.ts";
const decoder = new TextDecoder();
export async function handleMessage(shard: Shard, message: MessageEvent<any>): Promise<void> {
message = message.data;
// If message compression is enabled,
// Discord might send zlib compressed payloads.
if (shard.gatewayConfig.compress && message instanceof Blob) {
message = decompressWith(
new Uint8Array(await message.arrayBuffer()),
0,
(slice: Uint8Array) => decoder.decode(slice),
);
}
// Safeguard incase decompression failed to make a string.
if (typeof message !== "string") return;
const messageData = JSON.parse(message) as DiscordGatewayPayload;
// gateway.debug("GW RAW", { shardId, payload: messageData });
// TODO: remove
// console.log({ messageData: censor(messageData) });
switch (messageData.op) {
case GatewayOpcodes.Heartbeat: {
// TODO: can this actually happen
if (!shard.isOpen()) return;
shard.heart.lastBeat = Date.now();
// Discord randomly sends this requiring an immediate heartbeat back.
// Using a direct socket.send call here because heartbeat requests are reserved by us.
shard.socket?.send(
JSON.stringify({
op: GatewayOpcodes.Heartbeat,
d: shard.previousSequenceNumber,
}),
);
shard.events.heartbeat?.(shard);
break;
}
case GatewayOpcodes.Hello: {
const interval = (messageData.d as DiscordHello).heartbeat_interval;
shard.startHeartbeating(interval);
if (shard.state !== ShardState.Resuming) {
// HELLO has been send on a non resume action.
// This means that the shard starts a new session,
// therefore the rate limit interval has been reset too.
shard.bucket = createLeakyBucket({
max: shard.calculateSafeRequests(),
refillInterval: GATEWAY_RATE_LIMIT_RESET_INTERVAL,
refillAmount: shard.calculateSafeRequests(),
// Waiting acquires should not be lost on a re-identify.
waiting: shard.bucket.waiting,
});
}
shard.events.hello?.(shard);
break;
}
case GatewayOpcodes.HeartbeatACK: {
shard.heart.acknowledged = true;
shard.heart.lastAck = Date.now();
// Manually calculating the round trip time for users who need it.
if (shard.heart.lastBeat) {
shard.heart.rtt = shard.heart.lastAck - shard.heart.lastBeat;
}
shard.events.heartbeatAck?.(shard);
break;
}
case GatewayOpcodes.Reconnect: {
// gateway.debug("GW RECONNECT", { shardId });
shard.events.requestedReconnect?.(shard);
await shard.resume();
break;
}
case GatewayOpcodes.InvalidSession: {
// gateway.debug("GW INVALID_SESSION", { shardId, payload: messageData });
const resumable = messageData.d as boolean;
shard.events.invalidSession?.(shard, resumable);
// We need to wait for a random amount of time between 1 and 5
// Reference: https://discord.com/developers/docs/topics/gateway#resuming
await delay(Math.floor((Math.random() * 4 + 1) * 1000));
shard.resolves.get("INVALID_SESSION")?.(messageData);
shard.resolves.delete("INVALID_SESSION");
// When resumable is false we need to re-identify
if (!resumable) {
await shard.identify();
break;
}
// The session is invalid but apparently it is resumable
await shard.resume();
break;
}
}
if (messageData.t === "RESUMED") {
// gateway.debug("GW RESUMED", { shardId });
shard.state = ShardState.Connected;
shard.events.resumed?.(shard);
// Continue the requests which have been queued since the shard went offline.
shard.offlineSendQueue.map((resolve) => resolve());
shard.resolves.get("RESUMED")?.(messageData);
shard.resolves.delete("RESUMED");
} // Important for future resumes.
else if (messageData.t === "READY") {
const payload = messageData.d as DiscordReady;
shard.sessionId = payload.session_id;
shard.state = ShardState.Connected;
// Continue the requests which have been queued since the shard went offline.
// Important when this is a re-identify
shard.offlineSendQueue.map((resolve) => resolve());
shard.resolves.get("READY")?.(messageData);
shard.resolves.delete("READY");
}
// Update the sequence number if it is present
// `s` can be either `null` or a `number`.
// In order to prevent update misses when `s` is `0` we check against null.
if (messageData.s !== null) {
shard.previousSequenceNumber = messageData.s;
}
// The necessary handling required for the Shards connection has been finished.
// Now the event can be safely forwarded.
shard.events.message?.(shard, messageData);
}

50
vendor/gateway/shard/identify.ts vendored Normal file
View File

@ -0,0 +1,50 @@
import { GatewayOpcodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function identify(shard: Shard): Promise<void> {
// A new identify has been requested even though there is already a connection open.
// Therefore we need to close the old connection and heartbeating before creating a new one.
if (shard.state === ShardState.Connected) {
console.log("CLOSING EXISTING SHARD: #" + shard.id);
shard.close(ShardSocketCloseCodes.ReIdentifying, "Re-identifying closure of old connection.");
}
shard.state = ShardState.Identifying;
shard.events.identifying?.(shard);
// It is possible that the shard is in Heartbeating state but not identified,
// so check whether there is already a gateway connection existing.
// If not we need to create one before we identify.
if (!shard.isOpen()) {
await shard.connect();
}
// Wait until an identify is free for this shard.
await shard.requestIdentify();
shard.send({
op: GatewayOpcodes.Identify,
d: {
token: `Bot ${shard.gatewayConfig.token}`,
compress: shard.gatewayConfig.compress,
properties: shard.gatewayConfig.properties,
intents: shard.gatewayConfig.intents,
shard: [shard.id, shard.totalShards],
presence: await shard.makePresence?.(shard.id),
},
}, true);
return new Promise((resolve) => {
shard.resolves.set("READY", () => {
shard.events.identified?.(shard);
resolve();
});
// When identifying too fast,
// Discord sends an invalid session payload.
// This can safely be ignored though and the shard starts a new identify action.
shard.resolves.set("INVALID_SESSION", () => {
shard.resolves.delete("READY");
resolve();
});
});
}

5
vendor/gateway/shard/isOpen.ts vendored Normal file
View File

@ -0,0 +1,5 @@
import { Shard } from "./types.ts";
export function isOpen(shard: Shard): boolean {
return shard.socket?.readyState === WebSocket.OPEN;
}

14
vendor/gateway/shard/mod.ts vendored Normal file
View File

@ -0,0 +1,14 @@
export * from "./calculateSafeRequests.ts";
export * from "./close.ts";
export * from "./connect.ts";
export * from "./createShard.ts";
export * from "./handleClose.ts";
export * from "./handleMessage.ts";
export * from "./identify.ts";
export * from "./isOpen.ts";
export * from "./resume.ts";
export * from "./send.ts";
export * from "./shutdown.ts";
export * from "./startHeartbeating.ts";
export * from "./stopHeartbeating.ts";
export * from "./types.ts";

48
vendor/gateway/shard/resume.ts vendored Normal file
View File

@ -0,0 +1,48 @@
import { GatewayOpcodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function resume(shard: Shard): Promise<void> {
// gateway.debug("GW RESUMING", { shardId });
// It has been requested to resume the Shards session.
// It's possible that the shard is still connected with Discord's gateway therefore we need to forcefully close it.
if (shard.isOpen()) {
shard.close(ShardSocketCloseCodes.ResumeClosingOldConnection, "Reconnecting the shard, closing old connection.");
}
// Shard has never identified, so we cannot resume.
if (!shard.sessionId) {
// gateway.debug(
// "GW DEBUG",
// `[Error] Trying to resume a shard (id: ${shardId}) that was not first identified.`,
// );
return await shard.identify();
// throw new Error(`[SHARD] Trying to resume a shard (id: ${shard.id}) which was never identified`);
}
shard.state = ShardState.Resuming;
// Before we can resume, we need to create a new connection with Discord's gateway.
await shard.connect();
shard.send({
op: GatewayOpcodes.Resume,
d: {
token: `Bot ${shard.gatewayConfig.token}`,
session_id: shard.sessionId,
seq: shard.previousSequenceNumber ?? 0,
},
}, true);
return new Promise((resolve) => {
shard.resolves.set("RESUMED", () => resolve());
// If it is attempted to resume with an invalid session id,
// Discord sends an invalid session payload
// Not erroring here since it is easy that this happens, also it would be not catchable
shard.resolves.set("INVALID_SESSION", () => {
shard.resolves.delete("RESUMED");
resolve();
});
});
}

27
vendor/gateway/shard/send.ts vendored Normal file
View File

@ -0,0 +1,27 @@
import { Shard, ShardSocketRequest } from "./types.ts";
async function checkOffline(shard: Shard, highPriority: boolean): Promise<void> {
if (!shard.isOpen()) {
await new Promise((resolve) => {
if (highPriority) {
// Higher priority requests get added at the beginning of the array.
shard.offlineSendQueue.unshift(resolve);
} else {
shard.offlineSendQueue.push(resolve);
}
});
}
}
export async function send(shard: Shard, message: ShardSocketRequest, highPriority: boolean): Promise<void> {
// Before acquiring a token from the bucket, check whether the shard is currently offline or not.
// Else bucket and token wait time just get wasted.
await checkOffline(shard, highPriority);
await shard.bucket.acquire(1, highPriority);
// It's possible, that the shard went offline after a token has been acquired from the bucket.
await checkOffline(shard, highPriority);
shard.socket?.send(JSON.stringify(message));
}

6
vendor/gateway/shard/shutdown.ts vendored Normal file
View File

@ -0,0 +1,6 @@
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export async function shutdown(shard: Shard): Promise<void> {
shard.close(ShardSocketCloseCodes.Shutdown, "Shard shutting down.");
shard.state = ShardState.Offline;
}

View File

@ -0,0 +1,64 @@
import { GatewayOpcodes } from "../../types/shared.ts";
import { Shard, ShardSocketCloseCodes, ShardState } from "./types.ts";
export function startHeartbeating(shard: Shard, interval: number) {
// gateway.debug("GW HEARTBEATING_STARTED", { shardId, interval });
shard.heart.interval = interval;
// Only set the shard's state to `Unidentified`
// if heartbeating has not been started due to an identify or resume action.
if ([ShardState.Disconnected, ShardState.Offline].includes(shard.state)) {
shard.state = ShardState.Unidentified;
}
// The first heartbeat needs to be send with a random delay between `0` and `interval`
// Using a `setTimeout(_, jitter)` here to accomplish that.
// `Math.random()` can be `0` so we use `0.5` if this happens
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating
const jitter = Math.ceil(shard.heart.interval * (Math.random() || 0.5));
shard.heart.timeoutId = setTimeout(() => {
// Using a direct socket.send call here because heartbeat requests are reserved by us.
shard.socket?.send(JSON.stringify({
op: GatewayOpcodes.Heartbeat,
d: shard.previousSequenceNumber,
}));
shard.heart.lastBeat = Date.now();
shard.heart.acknowledged = false;
// After the random heartbeat jitter we can start a normal interval.
shard.heart.intervalId = setInterval(async () => {
// gateway.debug("GW DEBUG", `Running setInterval in heartbeat file. Shard: ${shardId}`);
// gateway.debug("GW HEARTBEATING", { shardId, shard: currentShard });
// The Shard did not receive a heartbeat ACK from Discord in time,
// therefore we have to assume that the connection has failed or got "zombied".
// The Shard needs to start a re-identify action accordingly.
// Reference: https://discord.com/developers/docs/topics/gateway#heartbeating-example-gateway-heartbeat-ack
if (!shard.heart.acknowledged) {
shard.close(
ShardSocketCloseCodes.ZombiedConnection,
"Zombied connection, did not receive an heartbeat ACK in time.",
);
return await shard.identify();
}
shard.heart.acknowledged = false;
// Using a direct socket.send call here because heartbeat requests are reserved by us.
shard.socket?.send(
JSON.stringify({
op: GatewayOpcodes.Heartbeat,
d: shard.previousSequenceNumber,
}),
);
shard.heart.lastBeat = Date.now();
shard.events.heartbeat?.(shard);
}, shard.heart.interval);
}, jitter);
}

View File

@ -0,0 +1,9 @@
import { Shard } from "./types.ts";
export function stopHeartbeating(shard: Shard): void {
// Clear the regular heartbeat interval.
clearInterval(shard.heart.intervalId);
// It's possible that the Shard got closed before the first jittered heartbeat.
// To go safe we should clear the related timeout too.
clearTimeout(shard.heart.timeoutId);
}

148
vendor/gateway/shard/types.ts vendored Normal file
View File

@ -0,0 +1,148 @@
import { DiscordGatewayPayload } from "../../types/discord.ts";
import { GatewayOpcodes } from "../../types/shared.ts";
import { createShard } from "./createShard.ts";
// TODO: think whether we also need an identifiedShard function
export const MAX_GATEWAY_REQUESTS_PER_INTERVAL = 120;
export const GATEWAY_RATE_LIMIT_RESET_INTERVAL = 60_000; // 60 seconds
export const DEFAULT_HEARTBEAT_INTERVAL = 45000;
export type Shard = ReturnType<typeof createShard>;
export enum ShardState {
/** Shard is fully connected to the gateway and receiving events from Discord. */
Connected = 0,
/** Shard started to connect to the gateway.
* This is only used if the shard is not currently trying to identify or resume.
*/
Connecting = 1,
/** Shard got disconnected and reconnection actions have been started. */
Disconnected = 2,
/** The shard is connected to the gateway but only heartbeating.
* At this state the shard has not been identified with discord.
*/
Unidentified = 3,
/** Shard is trying to identify with the gateway to create a new session. */
Identifying = 4,
/** Shard is trying to resume a session with the gateway. */
Resuming = 5,
/** Shard got shut down studied or due to a not (self) fixable error and may not attempt to reconnect on its own. */
Offline = 6,
}
export interface ShardGatewayConfig {
/** Whether incoming payloads are compressed using zlib.
*
* @default false
*/
compress: boolean;
/** The calculated intent value of the events which the shard should receive.
*
* @default 0
*/
intents: number;
/** Identify properties to use */
properties: {
/** Operating system the shard runs on.
*
* @default "darwin" | "linux" | "windows"
*/
os: string;
/** The "browser" where this shard is running on.
*
* @default "Discordeno"
*/
browser: string;
/** The device on which the shard is running.
*
* @default "Discordeno"
*/
device: string;
};
/** Bot token which is used to connect to Discord */
token: string;
/** The URL of the gateway which should be connected to.
*
* @default "wss://gateway.discord.gg"
*/
url: string;
/** The gateway version which should be used.
*
* @default 10
*/
version: number;
}
export interface ShardHeart {
/** Whether or not the heartbeat was acknowledged by Discord in time. */
acknowledged: boolean;
/** Interval between heartbeats requested by Discord. */
interval: number;
/** Id of the interval, which is used for sending the heartbeats. */
intervalId?: number;
/** Unix (in milliseconds) timestamp when the last heartbeat ACK was received from Discord. */
lastAck?: number;
/** Unix timestamp (in milliseconds) when the last heartbeat was sent. */
lastBeat?: number;
/** Round trip time (in milliseconds) from Shard to Discord and back.
* Calculated using the heartbeat system.
* Note: this value is undefined until the first heartbeat to Discord has happened.
*/
rtt?: number;
/** Id of the timeout which is used for sending the first heartbeat to Discord since it's "special". */
timeoutId?: number;
}
export interface ShardEvents {
/** A heartbeat has been send. */
heartbeat?(shard: Shard): unknown;
/** A heartbeat ACK was received. */
heartbeatAck?(shard: Shard): unknown;
/** Shard has received a Hello payload. */
hello?(shard: Shard): unknown;
/** The Shards session has been invalidated. */
invalidSession?(shard: Shard, resumable: boolean): unknown;
/** The shard has started a resume action. */
resuming?(shard: Shard): unknown;
/** The shard has successfully resumed an old session. */
resumed?(shard: Shard): unknown;
/** Discord has requested the Shard to reconnect. */
requestedReconnect?(shard: Shard): unknown;
/** The shard started to connect to Discord's gateway. */
connecting?(shard: Shard): unknown;
/** The shard is connected with Discord's gateway. */
connected?(shard: Shard): unknown;
/** The shard has been disconnected from Discord's gateway. */
disconnected?(shard: Shard): unknown;
/** The shard has started to identify itself to Discord. */
identifying?(shard: Shard): unknown;
/** The shard has successfully been identified itself with Discord. */
identified?(shard: Shard): unknown;
/** The shard has received a message from Discord. */
message?(shard: Shard, payload: DiscordGatewayPayload): unknown;
}
export enum ShardSocketCloseCodes {
/** A regular Shard shutdown. */
Shutdown = 3000,
/** A resume has been requested and therefore the old connection needs to be closed. */
ResumeClosingOldConnection = 3024,
/** Did not receive a heartbeat ACK in time.
* Closing the shard and creating a new session.
*/
ZombiedConnection = 3010,
/** Discordeno's gateway tests hae been finished, therefore the Shard can be turned off. */
TestingFinished = 3064,
/** Special close code reserved for Discordeno's zero-downtime resharding system. */
Resharded = 3065,
/** Shard is re-identifying therefore the old connection needs to be closed. */
ReIdentifying = 3066,
}
export interface ShardSocketRequest {
/** The OP-Code for the payload to send. */
op: GatewayOpcodes;
/** Payload data. */
d: unknown;
}

16
vendor/rest/README.md vendored Normal file
View File

@ -0,0 +1,16 @@
# Discordeno Rest
A standalone and server-less REST module with functionality of REST, independently.
- Easily host on any serverless infrastructure.
- Easy to use and setup with Cloudflare Workers (FREE for 100K requests per day!)
- Freedom from global rate limit errors
- As your bot grows, you want to handle global rate limits better. Shards don't communicate fast enough to truly
handle it properly so this allows 1 rest handler across the entire bot.
- In fact, you can host multiple instances of your bot and all connect to the same rest server.
- REST does not rest!
- Separate rest means if your bot for whatever reason crashes, your requests that are queued will still keep going and
will not be lost.
- Seamless updates! When you want to update and reboot the bot, you could potentially lose tons of messages or
responses that are in queue. Using this you could restart your bot without ever worrying about losing any responses.
- Scalability! Scalability! Scalability!

17
vendor/rest/checkRateLimits.ts vendored Normal file
View File

@ -0,0 +1,17 @@
import { RestManager } from "./restManager.ts";
/** Check the rate limits for a url or a bucket. */
export function checkRateLimits(rest: RestManager, url: string) {
const ratelimited = rest.rateLimitedPaths.get(url);
const global = rest.rateLimitedPaths.get("global");
const now = Date.now();
if (ratelimited && now < ratelimited.resetTimestamp) {
return ratelimited.resetTimestamp - now;
}
if (global && now < global.resetTimestamp) {
return global.resetTimestamp - now;
}
return false;
}

14
vendor/rest/cleanupQueues.ts vendored Normal file
View File

@ -0,0 +1,14 @@
import { RestManager } from "./restManager.ts";
/** Cleans up the queues by checking if there is nothing left and removing it. */
export function cleanupQueues(rest: RestManager) {
for (const [key, queue] of rest.pathQueues) {
rest.debug(`[REST - cleanupQueues] Running for of loop. ${key}`);
if (queue.requests.length) continue;
// REMOVE IT FROM CACHE
rest.pathQueues.delete(key);
}
// NO QUEUE LEFT, DISABLE THE QUEUE
if (!rest.pathQueues.size) rest.processingQueue = false;
}

6
vendor/rest/convertRestError.ts vendored Normal file
View File

@ -0,0 +1,6 @@
import { RestRequestRejection } from "./rest.ts";
export function convertRestError(errorStack: Error, data: RestRequestRejection): Error {
errorStack.message = `[${data.status}] ${data.error}\n${data.body}`;
return errorStack;
}

67
vendor/rest/createRequestBody.ts vendored Normal file
View File

@ -0,0 +1,67 @@
import { RestManager } from "./restManager.ts";
import { FileContent } from "../types/shared.ts";
import { USER_AGENT } from "../util/constants.ts";
import { RequestMethod, RestPayload, RestRequest } from "./rest.ts";
/** Creates the request body and headers that are necessary to send a request. Will handle different types of methods and everything necessary for discord. */
// export function createRequestBody(rest: RestManager, queuedRequest: { request: RestRequest; payload: RestPayload }) {
export function createRequestBody(rest: RestManager, options: CreateRequestBodyOptions) {
const headers: Record<string, string> = {
"user-agent": USER_AGENT,
};
if (!options.unauthorized) headers["authorization"] = `Bot ${rest.token}`;
// SOMETIMES SPECIAL HEADERS (E.G. CUSTOM AUTHORIZATION) NEED TO BE USED
if (options.headers) {
for (const key in options.headers) {
headers[key.toLowerCase()] = options.headers[key];
}
}
// GET METHODS SHOULD NOT HAVE A BODY
if (options.method === "GET") {
options.body = undefined;
}
// IF A REASON IS PROVIDED ENCODE IT IN HEADERS
if (options.body?.reason) {
headers["X-Audit-Log-Reason"] = encodeURIComponent(options.body.reason as string);
options.body.reason = undefined;
}
// IF A FILE/ATTACHMENT IS PRESENT WE NEED SPECIAL HANDLING
if (options.body?.file) {
if (!Array.isArray(options.body.file)) {
options.body.file = [options.body.file];
}
const form = new FormData();
for (let i = 0; i < (options.body.file as FileContent[]).length; i++) {
form.append(
`file${i}`,
(options.body.file as FileContent[])[i].blob,
(options.body.file as FileContent[])[i].name,
);
}
form.append("payload_json", JSON.stringify({ ...options.body, file: undefined }));
options.body.file = form;
} else if (options.body && !["GET", "DELETE"].includes(options.method)) {
headers["Content-Type"] = "application/json";
}
return {
headers,
body: (options.body?.file ?? JSON.stringify(options.body)) as FormData | string,
method: options.method,
};
}
export interface CreateRequestBodyOptions {
headers?: Record<string, string>;
method: RequestMethod;
body?: Record<string, unknown>;
unauthorized?: boolean;
}

14
vendor/rest/mod.ts vendored Normal file
View File

@ -0,0 +1,14 @@
export * from "./checkRateLimits.ts";
export * from "./cleanupQueues.ts";
export * from "./createRequestBody.ts";
export * from "./processGlobalQueue.ts";
export * from "./processQueue.ts";
export * from "./processRateLimitedPaths.ts";
export * from "./processRequest.ts";
export * from "./processRequestHeaders.ts";
export * from "./rest.ts";
export * from "./restManager.ts";
export * from "./runMethod.ts";
export * from "./simplifyUrl.ts";
export * from "./convertRestError.ts";
export * from "./sendRequest.ts";

81
vendor/rest/processGlobalQueue.ts vendored Normal file
View File

@ -0,0 +1,81 @@
import { RestManager } from "./restManager.ts";
import { HTTPResponseCodes } from "../types/shared.ts";
export async function processGlobalQueue(rest: RestManager) {
// IF QUEUE IS EMPTY EXIT
if (!rest.globalQueue.length) return;
// IF QUEUE IS ALREADY RUNNING EXIT
if (rest.globalQueueProcessing) return;
// SET AS TRUE SO OTHER QUEUES DON'T START
rest.globalQueueProcessing = true;
while (rest.globalQueue.length) {
// IF THE BOT IS GLOBALLY RATE LIMITED TRY AGAIN
if (rest.globallyRateLimited) {
setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] Globally rate limited, running setTimeout.`);
rest.processGlobalQueue(rest);
}, 1000);
// BREAK WHILE LOOP
break;
}
if (rest.invalidRequests === rest.maxInvalidRequests - rest.invalidRequestsSafetyAmount) {
setTimeout(() => {
const time = rest.invalidRequestsInterval - (Date.now() - rest.invalidRequestFrozenAt);
rest.debug(
`[REST - processGlobalQueue] Freeze global queue because of invalid requests. Time Remaining: ${
time / 1000
} seconds.`,
);
rest.processGlobalQueue(rest);
}, 1000);
// BREAK WHILE LOOP
break;
}
const request = rest.globalQueue.shift();
// REMOVES ANY POTENTIAL INVALID CONFLICTS
if (!request) continue;
// CHECK RATE LIMITS FOR 429 REPEATS
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
const urlResetIn = rest.checkRateLimits(rest, request.basicURL);
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
const bucketResetIn = request.payload.bucketId ? rest.checkRateLimits(rest, request.payload.bucketId) : false;
if (urlResetIn || bucketResetIn) {
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] rate limited, running setTimeout.`);
// THIS REST IS RATE LIMITED, SO PUSH BACK TO START
rest.globalQueue.unshift(request);
// START QUEUE IF NOT STARTED
rest.processGlobalQueue(rest);
}, urlResetIn || (bucketResetIn as number));
continue;
}
await rest.sendRequest(rest, {
url: request.urlToUse,
method: request.request.method,
bucketId: request.payload.bucketId,
reject: request.request.reject,
respond: request.request.respond,
retryCount: request.payload.retryCount ?? 0,
payload: rest.createRequestBody(rest, {
method: request.request.method,
body: request.payload.body,
}),
})
// Should be handled in sendRequest, this catch just prevents bots from dying
.catch(() => null);
}
// ALLOW OTHER QUEUES TO START WHEN NEW REQUEST IS MADE
rest.globalQueueProcessing = false;
}

57
vendor/rest/processQueue.ts vendored Normal file
View File

@ -0,0 +1,57 @@
import { RestManager } from "./restManager.ts";
/** Processes the queue by looping over each path separately until the queues are empty. */
export function processQueue(rest: RestManager, id: string) {
const queue = rest.pathQueues.get(id);
if (!queue) return;
while (queue.requests.length) {
rest.debug(`[REST - processQueue] Running while loop.`);
// SELECT THE FIRST ITEM FROM THIS QUEUE
const queuedRequest = queue.requests[0];
// IF THIS DOESN'T HAVE ANY ITEMS JUST CANCEL, THE CLEANER WILL REMOVE IT.
if (!queuedRequest) break;
const basicURL = rest.simplifyUrl(queuedRequest.request.url, queuedRequest.request.method);
// IF THIS URL IS STILL RATE LIMITED, TRY AGAIN
const urlResetIn = rest.checkRateLimits(rest, basicURL);
if (urlResetIn) {
// ONLY ADD TIMEOUT IF ANOTHER QUEUE IS NOT PENDING
if (!queue.isWaiting) {
queue.isWaiting = true;
setTimeout(() => {
queue.isWaiting = false;
rest.debug(`[REST - processQueue] rate limited, running setTimeout.`);
rest.processQueue(rest, id);
}, urlResetIn);
}
// BREAK WHILE LOOP
break;
}
// IF A BUCKET EXISTS, CHECK THE BUCKET'S RATE LIMITS
const bucketResetIn = queuedRequest.payload.bucketId
? rest.checkRateLimits(rest, queuedRequest.payload.bucketId)
: false;
// THIS BUCKET IS STILL RATE LIMITED, RE-ADD TO QUEUE
if (bucketResetIn) continue;
// EXECUTE THE REQUEST
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
rest.debug(`[REST - Add To Global Queue] ${JSON.stringify(queuedRequest.payload)}`);
rest.globalQueue.push({
...queuedRequest,
urlToUse: queuedRequest.request.url,
basicURL,
});
rest.processGlobalQueue(rest);
queue.requests.shift();
}
// ONCE QUEUE IS DONE, WE CAN TRY CLEANING UP
rest.cleanupQueues(rest);
}

29
vendor/rest/processRateLimitedPaths.ts vendored Normal file
View File

@ -0,0 +1,29 @@
import { RestManager } from "./restManager.ts";
/** This will create a infinite loop running in 1 seconds using tail recursion to keep rate limits clean. When a rate limit resets, this will remove it so the queue can proceed. */
export function processRateLimitedPaths(rest: RestManager) {
const now = Date.now();
for (const [key, value] of rest.rateLimitedPaths.entries()) {
rest.debug(`[REST - processRateLimitedPaths] Running for of loop. ${value.resetTimestamp - now}`);
// IF THE TIME HAS NOT REACHED CANCEL
if (value.resetTimestamp > now) continue;
// RATE LIMIT IS OVER, DELETE THE RATE LIMITER
rest.rateLimitedPaths.delete(key);
// IF IT WAS GLOBAL ALSO MARK THE GLOBAL VALUE AS FALSE
if (key === "global") rest.globallyRateLimited = false;
}
// ALL PATHS ARE CLEARED CAN CANCEL OUT!
if (!rest.rateLimitedPaths.size) {
rest.processingRateLimitedPaths = false;
} else {
rest.processingRateLimitedPaths = true;
// RECHECK IN 1 SECOND
setTimeout(() => {
rest.debug(`[REST - processRateLimitedPaths] Running setTimeout.`);
rest.processRateLimitedPaths(rest);
}, 1000);
}
}

36
vendor/rest/processRequest.ts vendored Normal file
View File

@ -0,0 +1,36 @@
import { RestManager } from "./restManager.ts";
import { BASE_URL } from "../util/constants.ts";
import { RestPayload, RestRequest } from "./rest.ts";
/** Processes a request and assigns it to a queue or creates a queue if none exists for it. */
export function processRequest(rest: RestManager, request: RestRequest, payload: RestPayload) {
const route = request.url.substring(request.url.indexOf("api/"));
const parts = route.split("/");
// REMOVE THE API
parts.shift();
// REMOVES THE VERSION NUMBER
if (parts[0]?.startsWith("v")) parts.shift();
// SET THE NEW REQUEST URL
request.url = `${BASE_URL}/v${rest.version}/${parts.join("/")}`;
// REMOVE THE MAJOR PARAM
parts.shift();
const url = rest.simplifyUrl(request.url, request.method);
const queue = rest.pathQueues.get(url);
if (queue) {
queue.requests.push({ request, payload });
} else {
// CREATES A NEW QUEUE
rest.pathQueues.set(url, {
isWaiting: false,
requests: [
{
request,
payload,
},
],
});
rest.processQueue(rest, url);
}
}

63
vendor/rest/processRequestHeaders.ts vendored Normal file
View File

@ -0,0 +1,63 @@
import { RestManager } from "./restManager.ts";
/** Processes the rate limit headers and determines if it needs to be rate limited and returns the bucket id if available */
export function processRequestHeaders(rest: RestManager, url: string, headers: Headers) {
let rateLimited = false;
// GET ALL NECESSARY HEADERS
const remaining = headers.get("x-ratelimit-remaining");
const retryAfter = headers.get("x-ratelimit-reset-after");
const reset = Date.now() + Number(retryAfter) * 1000;
const global = headers.get("x-ratelimit-global");
// undefined override null needed for typings
const bucketId = headers.get("x-ratelimit-bucket") || undefined;
// IF THERE IS NO REMAINING RATE LIMIT, MARK IT AS RATE LIMITED
if (remaining === "0") {
rateLimited = true;
// SAVE THE URL AS LIMITED, IMPORTANT FOR NEW REQUESTS BY USER WITHOUT BUCKET
rest.rateLimitedPaths.set(url, {
url,
resetTimestamp: reset,
bucketId,
});
// SAVE THE BUCKET AS LIMITED SINCE DIFFERENT URLS MAY SHARE A BUCKET
if (bucketId) {
rest.rateLimitedPaths.set(bucketId, {
url,
resetTimestamp: reset,
bucketId,
});
}
}
// IF THERE IS NO REMAINING GLOBAL LIMIT, MARK IT RATE LIMITED GLOBALLY
if (global) {
const retryAfter = headers.get("retry-after");
const globalReset = Date.now() + Number(retryAfter) * 1000;
rest.debug(`[REST = Globally Rate Limited] URL: ${url} | Global Rest: ${globalReset}`);
rest.globallyRateLimited = true;
rateLimited = true;
rest.rateLimitedPaths.set("global", {
url: "global",
resetTimestamp: globalReset,
bucketId,
});
if (bucketId) {
rest.rateLimitedPaths.set(bucketId, {
url: "global",
resetTimestamp: globalReset,
bucketId,
});
}
}
if (rateLimited && !rest.processingRateLimitedPaths) {
rest.processRateLimitedPaths(rest);
}
return rateLimited ? bucketId : undefined;
}

31
vendor/rest/rest.ts vendored Normal file
View File

@ -0,0 +1,31 @@
export interface RestRequest {
url: string;
method: RequestMethod;
respond: (payload: RestRequestResponse) => unknown;
reject: (payload: RestRequestRejection) => unknown;
}
export interface RestRequestResponse {
ok: boolean;
status: number;
body?: string;
}
export interface RestRequestRejection extends RestRequestResponse {
error: string;
}
export interface RestPayload {
bucketId?: string;
body?: Record<string, unknown>;
retryCount: number;
headers?: Record<string, string>;
}
export interface RestRateLimitedPath {
url: string;
resetTimestamp: number;
bucketId?: string;
}
export type RequestMethod = "GET" | "POST" | "PUT" | "DELETE" | "PATCH";

102
vendor/rest/restManager.ts vendored Normal file
View File

@ -0,0 +1,102 @@
import { checkRateLimits } from "./checkRateLimits.ts";
import { cleanupQueues } from "./cleanupQueues.ts";
import { createRequestBody } from "./createRequestBody.ts";
import { processGlobalQueue } from "./processGlobalQueue.ts";
import { processQueue } from "./processQueue.ts";
import { processRateLimitedPaths } from "./processRateLimitedPaths.ts";
import { processRequest } from "./processRequest.ts";
import { processRequestHeaders } from "./processRequestHeaders.ts";
import { convertRestError } from "./convertRestError.ts";
import { RestPayload, RestRateLimitedPath, RestRequest } from "./rest.ts";
import { runMethod } from "./runMethod.ts";
import { simplifyUrl } from "./simplifyUrl.ts";
import { baseEndpoints } from "../util/constants.ts";
import { API_VERSION } from "../util/constants.ts";
import { removeTokenPrefix } from "../util/token.ts";
import { sendRequest } from "./sendRequest.ts";
export function createRestManager(options: CreateRestManagerOptions) {
const version = options.version || API_VERSION;
if (options.customUrl) {
baseEndpoints.BASE_URL = `${options.customUrl}/v${version}`;
}
return {
// current invalid amount
invalidRequests: 0,
// max invalid requests allowed until ban
maxInvalidRequests: 10000,
// 10 minutes
invalidRequestsInterval: 600000,
// timer to reset to 0
invalidRequestsTimeoutId: 0,
// how safe to be from max
invalidRequestsSafetyAmount: 1,
// when first request in this period was made
invalidRequestFrozenAt: 0,
invalidRequestErrorStatuses: [401, 403, 429],
version,
token: removeTokenPrefix(options.token),
maxRetryCount: options.maxRetryCount || 10,
secretKey: options.secretKey || "discordeno_best_lib_ever",
customUrl: options.customUrl || "",
pathQueues: new Map<
string,
{
isWaiting: boolean;
requests: {
request: RestRequest;
payload: RestPayload;
}[];
}
>(),
processingQueue: false,
processingRateLimitedPaths: false,
globallyRateLimited: false,
globalQueue: [] as {
request: RestRequest;
payload: RestPayload;
basicURL: string;
urlToUse: string;
}[],
globalQueueProcessing: false,
rateLimitedPaths: new Map<string, RestRateLimitedPath>(),
debug: options.debug || function (_text: string) {},
checkRateLimits: options.checkRateLimits || checkRateLimits,
cleanupQueues: options.cleanupQueues || cleanupQueues,
processQueue: options.processQueue || processQueue,
processRateLimitedPaths: options.processRateLimitedPaths || processRateLimitedPaths,
processRequestHeaders: options.processRequestHeaders || processRequestHeaders,
processRequest: options.processRequest || processRequest,
createRequestBody: options.createRequestBody || createRequestBody,
runMethod: options.runMethod || runMethod,
simplifyUrl: options.simplifyUrl || simplifyUrl,
processGlobalQueue: options.processGlobalQueue || processGlobalQueue,
convertRestError: options.convertRestError || convertRestError,
sendRequest: options.sendRequest || sendRequest,
};
}
export interface CreateRestManagerOptions {
token: string;
customUrl?: string;
maxRetryCount?: number;
version?: number;
secretKey?: string;
debug?: (text: string) => unknown;
checkRateLimits?: typeof checkRateLimits;
cleanupQueues?: typeof cleanupQueues;
processQueue?: typeof processQueue;
processRateLimitedPaths?: typeof processRateLimitedPaths;
processRequestHeaders?: typeof processRequestHeaders;
processRequest?: typeof processRequest;
createRequestBody?: typeof createRequestBody;
runMethod?: typeof runMethod;
simplifyUrl?: typeof simplifyUrl;
processGlobalQueue?: typeof processGlobalQueue;
convertRestError?: typeof convertRestError;
sendRequest?: typeof sendRequest;
}
export type RestManager = ReturnType<typeof createRestManager>;

78
vendor/rest/runMethod.ts vendored Normal file
View File

@ -0,0 +1,78 @@
import { RestManager } from "./restManager.ts";
import { API_VERSION, BASE_URL, baseEndpoints } from "../util/constants.ts";
import { RequestMethod, RestRequestRejection, RestRequestResponse } from "./rest.ts";
export async function runMethod<T = any>(
rest: RestManager,
method: RequestMethod,
route: string,
body?: unknown,
options?: {
retryCount?: number;
bucketId?: string;
headers?: Record<string, string>;
},
): Promise<T> {
rest.debug(
`[REST - RequestCreate] Method: ${method} | URL: ${route} | Retry Count: ${
options?.retryCount ?? 0
} | Bucket ID: ${options?.bucketId} | Body: ${
JSON.stringify(
body,
)
}`,
);
const errorStack = new Error("Location:");
// @ts-ignore Breaks deno deploy. Luca said add ts-ignore until it's fixed
Error.captureStackTrace(errorStack);
// For proxies we don't need to do any of the legwork so we just forward the request
if (!baseEndpoints.BASE_URL.startsWith(BASE_URL) && route[0] === "/") {
const result = await fetch(`${baseEndpoints.BASE_URL}${route}`, {
body: body ? JSON.stringify(body) : undefined,
headers: {
Authorization: rest.secretKey,
"Content-Type": "application/json",
},
method,
}).catch((error) => {
errorStack.message = (error as Error)?.message;
console.error(error);
throw errorStack;
});
if (!result.ok) {
errorStack.message = result.statusText;
rest.debug(`[ERROR] ${errorStack.message}`);
// Closes the response to prevent memory leak
await result.text();
throw errorStack;
}
return result.status !== 204 ? await result.json() : undefined;
}
// No proxy so we need to handle all rate limiting and such
return new Promise((resolve, reject) => {
rest.processRequest(
rest,
{
url: route[0] === "/" ? `${BASE_URL}/v${API_VERSION}${route}` : route,
method,
reject: (data: RestRequestRejection) => {
const restError = rest.convertRestError(errorStack, data);
reject(restError);
},
respond: (data: RestRequestResponse) =>
resolve(data.status !== 204 ? JSON.parse(data.body ?? "{}") : (undefined as unknown as T)),
},
{
bucketId: options?.bucketId,
body: body as Record<string, unknown> | undefined,
retryCount: options?.retryCount ?? 0,
headers: options?.headers,
},
);
});
}

47
vendor/rest/runProxyMethod.ts vendored Normal file
View File

@ -0,0 +1,47 @@
import { RestManager } from "./restManager.ts";
import { RestRequestRejection, RestRequestResponse } from "./rest.ts";
export type ProxyMethodResponse<T> = Omit<RestRequestResponse | RestRequestRejection, "body"> & { body?: T };
// Left out proxy request, because it's not needed here
// this file could also be moved to a plugin.
export async function runProxyMethod<T = any>(
rest: RestManager,
method: "GET" | "POST" | "PUT" | "DELETE" | "PATCH",
url: string,
body?: unknown,
retryCount = 0,
bucketId?: string,
): Promise<ProxyMethodResponse<T>> {
rest.debug(
`[REST - RequestCreate] Method: ${method} | URL: ${url} | Retry Count: ${retryCount} | Bucket ID: ${bucketId} | Body: ${
JSON.stringify(
body,
)
}`,
);
// No proxy so we need to handle all rate limiting and such
return new Promise((resolve, reject) => {
rest.processRequest(
rest,
{
url,
method,
reject: (data: RestRequestRejection) => {
const { body: b, ...r } = data;
reject({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
},
respond: (data: RestRequestResponse) => {
const { body: b, ...r } = data;
resolve({ body: data.status !== 204 ? JSON.parse(b ?? "{}") : (undefined as unknown as T), ...r });
},
},
{
bucketId,
body: body as Record<string, unknown> | undefined,
retryCount,
},
);
});
}

155
vendor/rest/sendRequest.ts vendored Normal file
View File

@ -0,0 +1,155 @@
import { HTTPResponseCodes } from "../types/shared.ts";
import { BASE_URL } from "../util/constants.ts";
import { RequestMethod } from "./rest.ts";
import { RestManager } from "./restManager.ts";
export interface RestSendRequestOptions {
url: string;
method: RequestMethod;
bucketId?: string;
reject?: Function;
respond?: Function;
retryCount?: number;
payload?: {
headers: Record<string, string>;
body: string | FormData;
};
}
export async function sendRequest<T>(rest: RestManager, options: RestSendRequestOptions): Promise<T> {
try {
// CUSTOM HANDLER FOR USER TO LOG OR WHATEVER WHENEVER A FETCH IS MADE
rest.debug(`[REST - fetching] URL: ${options.url} | ${JSON.stringify(options)}`);
const response = await fetch(
options.url.startsWith(BASE_URL) ? options.url : `${BASE_URL}/v${rest.version}/${options.url}`,
{
method: options.method,
headers: options.payload?.headers,
body: options.payload?.body,
},
);
rest.debug(`[REST - fetched] URL: ${options.url} | ${JSON.stringify(options)}`);
const bucketIdFromHeaders = rest.processRequestHeaders(
rest,
rest.simplifyUrl(options.url, options.method),
response.headers,
);
// SET THE BUCKET Id IF IT WAS PRESENT
if (bucketIdFromHeaders) {
options.bucketId = bucketIdFromHeaders;
}
if (response.status < 200 || response.status >= 400) {
rest.debug(
`[REST - httpError] Payload: ${JSON.stringify(options)} | Response: ${JSON.stringify(response)}`,
);
let error = "REQUEST_UNKNOWN_ERROR";
switch (response.status) {
case HTTPResponseCodes.BadRequest:
error = "The options was improperly formatted, or the server couldn't understand it.";
break;
case HTTPResponseCodes.Unauthorized:
error = "The Authorization header was missing or invalid.";
break;
case HTTPResponseCodes.Forbidden:
error = "The Authorization token you passed did not have permission to the resource.";
break;
case HTTPResponseCodes.NotFound:
error = "The resource at the location specified doesn't exist.";
break;
case HTTPResponseCodes.MethodNotAllowed:
error = "The HTTP method used is not valid for the location specified.";
break;
case HTTPResponseCodes.GatewayUnavailable:
error = "There was not a gateway available to process your options. Wait a bit and retry.";
break;
}
if (
rest.invalidRequestErrorStatuses.includes(response.status) &&
!(response.status === 429 && response.headers.get("X-RateLimit-Scope"))
) {
// INCREMENT CURRENT INVALID REQUESTS
++rest.invalidRequests;
if (!rest.invalidRequestsTimeoutId) {
rest.invalidRequestsTimeoutId = setTimeout(() => {
rest.debug(`[REST - processGlobalQueue] Resetting invalid optionss counter in setTimeout.`);
rest.invalidRequests = 0;
rest.invalidRequestsTimeoutId = 0;
}, rest.invalidRequestsInterval);
}
}
// If NOT rate limited remove from queue
if (response.status !== 429) {
options.reject?.({
ok: false,
status: response.status,
error,
body: response.type ? JSON.stringify(await response.json()) : undefined,
});
throw new Error(
JSON.stringify({
ok: false,
status: response.status,
error,
body: response.type ? JSON.stringify(await response.json()) : undefined,
}),
);
} else {
if (options.retryCount && options.retryCount++ >= rest.maxRetryCount) {
rest.debug(`[REST - RetriesMaxed] ${JSON.stringify(options)}`);
// REMOVE ITEM FROM QUEUE TO PREVENT RETRY
options.reject?.({
ok: false,
status: response.status,
error: "The options was rate limited and it maxed out the retries limit.",
});
// @ts-ignore Code should never reach here
return;
}
}
}
// SOMETIMES DISCORD RETURNS AN EMPTY 204 RESPONSE THAT CAN'T BE MADE TO JSON
if (response.status === 204) {
rest.debug(`[REST - FetchSuccess] URL: ${options.url} | ${JSON.stringify(options)}`);
options.respond?.({
ok: true,
status: 204,
});
// @ts-ignore 204 will be void
return;
} else {
// CONVERT THE RESPONSE TO JSON
const json = JSON.stringify(await response.json());
rest.debug(`[REST - fetchSuccess] ${JSON.stringify(options)}`);
options.respond?.({
ok: true,
status: 200,
body: json,
});
return JSON.parse(json);
}
} catch (error) {
// SOMETHING WENT WRONG, LOG AND RESPOND WITH ERROR
rest.debug(`[REST - fetchFailed] Payload: ${JSON.stringify(options)} | Error: ${error}`);
options.reject?.({
ok: false,
status: 599,
error: "Internal Proxy Error",
});
throw new Error("Something went wrong in sendRequest", {
cause: error,
});
}
}

25
vendor/rest/simplifyUrl.ts vendored Normal file
View File

@ -0,0 +1,25 @@
/**
* Credits: github.com/abalabahaha/eris lib/rest/RequestHandler.js#L397
* Modified for our use-case
*/
/** Split a url to separate rate limit buckets based on major/minor parameters. */
export function simplifyUrl(url: string, method: string) {
let route = url
.replace(/\/([a-z-]+)\/(?:[0-9]{17,19})/g, function (match, p) {
return ["channels", "guilds"].includes(p) ? match : `/${p}/skillzPrefersID`;
})
.replace(/\/reactions\/[^/]+/g, "/reactions/skillzPrefersID");
// GENERAL /reactions and /reactions/emoji/@me share the buckets
if (route.includes("/reactions")) {
route = route.substring(0, route.indexOf("/reactions") + "/reactions".length);
}
// Delete Message endpoint has its own rate limit
if (method === "DELETE" && route.endsWith("/messages/skillzPrefersID")) {
route = method + route;
}
return route;
}

2426
vendor/types/discord.ts vendored Normal file

File diff suppressed because it is too large Load Diff

2
vendor/types/mod.ts vendored Normal file
View File

@ -0,0 +1,2 @@
export * from "./discord.ts";
export * from "./shared.ts";

1332
vendor/types/shared.ts vendored Normal file

File diff suppressed because it is too large Load Diff

175
vendor/util/bucket.ts vendored Normal file
View File

@ -0,0 +1,175 @@
import { PickPartial } from "../types/shared.ts";
import { delay } from "./delay.ts";
/** A Leaky Bucket.
* Useful for rate limiting purposes.
* This uses `performance.now()` instead of `Date.now()` for higher accuracy.
*
* NOTE: This bucket is lazy, means it only updates when a related method is called.
*/
export interface LeakyBucket {
// ----------
// PROPERTIES
// ----------
/** How many tokens this bucket can hold. */
max: number;
/** Amount of tokens gained per interval.
* If bigger than `max` it will be pressed to `max`.
*/
refillAmount: number;
/** Interval at which the bucket gains tokens. */
refillInterval: number;
// ----------
// METHODS
// ----------
/** Acquire tokens from the bucket.
* Resolves when the tokens are acquired and available.
* @param {boolean} [highPriority=false] Whether this acquire is should be done asap.
*/
acquire(amount: number, highPriority?: boolean): Promise<void>;
/** Returns the number of milliseconds until the next refill. */
nextRefill(): number;
/** Current tokens in the bucket. */
tokens(): number;
// ----------
// INTERNAL STATES
// ----------
/** @private Internal track of when the last refill of tokens was.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/
lastRefill: number;
/** @private Internal state of whether currently it is allowed to acquire tokens.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/
allowAcquire: boolean;
/** @private Internal number of currently available tokens.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/
tokensState: number;
/** @private Internal array of promises necessary to guarantee no race conditions.
* DO NOT TOUCH THIS! Unless you know what you are doing ofc :P
*/
waiting: ((_?: unknown) => void)[];
}
export function createLeakyBucket(
{ max, refillInterval, refillAmount, tokens, waiting, ...rest }:
& Omit<
PickPartial<
LeakyBucket,
"max" | "refillInterval" | "refillAmount"
>,
"tokens"
>
& {
/** Current tokens in the bucket.
* @default max
*/
tokens?: number;
},
): LeakyBucket {
return {
max,
refillInterval,
refillAmount: refillAmount > max ? max : refillAmount,
lastRefill: performance.now(),
allowAcquire: true,
nextRefill: function () {
return nextRefill(this);
},
tokens: function () {
return updateTokens(this);
},
acquire: async function (amount, highPriority) {
return await acquire(this, amount, highPriority);
},
tokensState: tokens ?? max,
waiting: waiting ?? [],
...rest,
};
}
/** Update the tokens of that bucket.
* @returns {number} The amount of current available tokens.
*/
function updateTokens(bucket: LeakyBucket): number {
const timePassed = performance.now() - bucket.lastRefill;
const missedRefills = Math.floor(timePassed / bucket.refillInterval);
// The refill shall not exceed the max amount of tokens.
bucket.tokensState = Math.min(bucket.tokensState + (bucket.refillAmount * missedRefills), bucket.max);
bucket.lastRefill += bucket.refillInterval * missedRefills;
return bucket.tokensState;
}
function nextRefill(bucket: LeakyBucket): number {
// Since this bucket is lazy update the tokens before calculating the next refill.
updateTokens(bucket);
return (performance.now() - bucket.lastRefill) + bucket.refillInterval;
}
async function acquire(bucket: LeakyBucket, amount: number, highPriority = false): Promise<void> {
// To prevent the race condition of 2 acquires happening at once,
// check whether its currently allowed to acquire.
if (!bucket.allowAcquire) {
// create, push, and wait until the current running acquiring is finished.
await new Promise((resolve) => {
if (highPriority) {
bucket.waiting.unshift(resolve);
} else {
bucket.waiting.push(resolve);
}
});
// Somehow another acquire has started,
// so need to wait again.
if (!bucket.allowAcquire) {
return await acquire(bucket, amount);
}
}
bucket.allowAcquire = false;
// Since the bucket is lazy update the tokens now,
// and also get the current amount of available tokens
let currentTokens = updateTokens(bucket);
// It's possible that more than available tokens have been acquired,
// so calculate the amount of milliseconds to wait until this acquire is good to go.
if (currentTokens < amount) {
const tokensNeeded = amount - currentTokens;
let refillsNeeded = Math.ceil(tokensNeeded / bucket.refillAmount);
const waitTime = bucket.refillInterval * refillsNeeded;
await delay(waitTime);
// Update the tokens again to ensure nothing has been missed.
updateTokens(bucket);
}
// In order to not subtract too much from the tokens,
// calculate what is actually needed to subtract.
const toSubtract = (amount % bucket.refillAmount) || amount;
bucket.tokensState -= toSubtract;
// Allow the next acquire to happen.
bucket.allowAcquire = true;
// If there is an acquire waiting, let it continue.
bucket.waiting.shift()?.();
}

100
vendor/util/collection.ts vendored Normal file
View File

@ -0,0 +1,100 @@
export class Collection<K, V> extends Map<K, V> {
maxSize: number | undefined;
constructor(entries?: (readonly (readonly [K, V])[] | null) | Map<K, V>, options?: CollectionOptions<K, V>) {
super(entries ?? []);
this.maxSize = options?.maxSize;
}
set(key: K, value: V) {
// When this collection is maxSized make sure we can add first
if ((this.maxSize || this.maxSize === 0) && this.size >= this.maxSize) {
return this;
}
return super.set(key, value);
}
forceSet(key: K, value: V) {
return super.set(key, value);
}
array() {
return [...this.values()];
}
/** Retrieve the value of the first element in this collection */
first(): V | undefined {
return this.values().next().value;
}
last(): V | undefined {
return [...this.values()][this.size - 1];
}
random(): V | undefined {
const array = [...this.values()];
return array[Math.floor(Math.random() * array.length)];
}
find(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (callback(value, key)) return value;
}
// If nothing matched
return;
}
filter(callback: (value: V, key: K) => boolean) {
const relevant = new Collection<K, V>();
this.forEach((value, key) => {
if (callback(value, key)) relevant.set(key, value);
});
return relevant;
}
map<T>(callback: (value: V, key: K) => T) {
const results = [];
for (const key of this.keys()) {
const value = this.get(key)!;
results.push(callback(value, key));
}
return results;
}
some(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (callback(value, key)) return true;
}
return false;
}
every(callback: (value: V, key: K) => boolean) {
for (const key of this.keys()) {
const value = this.get(key)!;
if (!callback(value, key)) return false;
}
return true;
}
reduce<T>(callback: (accumulator: T, value: V, key: K) => T, initialValue?: T): T {
let accumulator: T = initialValue!;
for (const key of this.keys()) {
const value = this.get(key)!;
accumulator = callback(accumulator, value, key);
}
return accumulator;
}
}
export interface CollectionOptions<K, V> {
maxSize?: number;
}

26
vendor/util/constants.ts vendored Normal file
View File

@ -0,0 +1,26 @@
/** https://discord.com/developers/docs/reference#api-reference-base-url */
export const BASE_URL = "https://discord.com/api";
/** https://discord.com/developers/docs/reference#api-versioning-api-versions */
export const API_VERSION = 10;
// TODO: update this version
/** https://github.com/discordeno/discordeno/releases */
export const DISCORDENO_VERSION = "13.0.0-rc45";
/** https://discord.com/developers/docs/reference#user-agent */
export const USER_AGENT = `DiscordBot (https://github.com/discordeno/discordeno, v${DISCORDENO_VERSION})`;
/** https://discord.com/developers/docs/reference#image-formatting-image-base-url */
export const IMAGE_BASE_URL = "https://cdn.discordapp.com";
// This can be modified by big brain bots and use a proxy
export const baseEndpoints = {
BASE_URL: `${BASE_URL}/v${API_VERSION}`,
CDN_URL: IMAGE_BASE_URL,
};
export const SLASH_COMMANDS_NAME_REGEX = /^[-_\p{L}\p{N}\p{sc=Deva}\p{sc=Thai}]{1,32}$/u;
export const CONTEXT_MENU_COMMANDS_NAME_REGEX = /^[\w-\s]{1,32}$/;
export const CHANNEL_MENTION_REGEX = /<#[0-9]+>/g;
export const DISCORD_SNOWFLAKE_REGEX = /^(?<id>\d{17,19})$/;

8
vendor/util/delay.ts vendored Normal file
View File

@ -0,0 +1,8 @@
/** Pause the execution for a given amount of milliseconds. */
export function delay(ms: number): Promise<void> {
return new Promise((res): number =>
setTimeout((): void => {
res();
}, ms)
);
}

5
vendor/util/mod.ts vendored Normal file
View File

@ -0,0 +1,5 @@
export * from "./bucket.ts";
export * from "./collection.ts";
export * from "./constants.ts";
export * from "./delay.ts";
export * from "./token.ts";

14
vendor/util/token.ts vendored Normal file
View File

@ -0,0 +1,14 @@
/** Removes the Bot before the token. */
export function removeTokenPrefix(token?: string, type: "GATEWAY" | "REST" = "REST"): string {
// If no token is provided, throw an error
if (!token) throw new Error(`The ${type} was not given a token. Please provide a token and try again.`);
// If the token does not have a prefix just return token
if (!token.startsWith("Bot ")) return token;
// Remove the prefix and return only the token.
return token.substring(token.indexOf(" ") + 1);
}
/** Get the bot id from the bot token. WARNING: Discord staff has mentioned this may not be stable forever. Use at your own risk. However, note for over 5 years this has never broken. */
export function getBotIdFromToken(token: string) {
return BigInt(atob(token.split(".")[0]));
}