diff --git a/.babel.rc b/.babel.rc
new file mode 100644
index 0000000..79097aa
--- /dev/null
+++ b/.babel.rc
@@ -0,0 +1,3 @@
+{
+ "presents" : ["@babel/present-env"]
+}
\ No newline at end of file
diff --git a/.babelrc b/.babelrc
new file mode 100644
index 0000000..b085c21
--- /dev/null
+++ b/.babelrc
@@ -0,0 +1,3 @@
+{
+ "presents": ["@babel/present-env"]
+}
\ No newline at end of file
diff --git a/.codesandbox/node/child_process.js b/.codesandbox/node/child_process.js
new file mode 100644
index 0000000..f531ed5
--- /dev/null
+++ b/.codesandbox/node/child_process.js
@@ -0,0 +1,1021 @@
+"use strict";
+
+const {
+ ArrayIsArray,
+ ArrayPrototypeFilter,
+ ArrayPrototypeIncludes,
+ ArrayPrototypeJoin,
+ ArrayPrototypeLastIndexOf,
+ ArrayPrototypePush,
+ ArrayPrototypePushApply,
+ ArrayPrototypeSlice,
+ ArrayPrototypeSort,
+ ArrayPrototypeSplice,
+ ArrayPrototypeUnshift,
+ ObjectAssign,
+ ObjectDefineProperty,
+ ObjectPrototypeHasOwnProperty,
+ PromiseWithResolvers,
+ RegExpPrototypeExec,
+ SafeSet,
+ StringPrototypeIncludes,
+ StringPrototypeSlice,
+ StringPrototypeToUpperCase,
+ SymbolDispose,
+} = primordials;
+
+const {
+ assignFunctionName,
+ convertToValidSignal,
+ getSystemErrorName,
+ kEmptyObject,
+ promisify,
+} = require("internal/util");
+const { isArrayBufferView } = require("internal/util/types");
+let debug = require("internal/util/debuglog").debuglog(
+ "child_process",
+ (fn) => {
+ debug = fn;
+ }
+);
+const { Buffer } = require("buffer");
+const { Pipe, constants: PipeConstants } = internalBinding("pipe_wrap");
+
+const {
+ AbortError,
+ codes: {
+ ERR_CHILD_PROCESS_IPC_REQUIRED,
+ ERR_CHILD_PROCESS_STDIO_MAXBUFFER,
+ ERR_INVALID_ARG_TYPE,
+ ERR_INVALID_ARG_VALUE,
+ },
+ genericNodeError,
+} = require("internal/errors");
+const { clearTimeout, setTimeout } = require("timers");
+const { getValidatedPath } = require("internal/fs/utils");
+const {
+ validateAbortSignal,
+ validateArray,
+ validateBoolean,
+ validateFunction,
+ validateInteger,
+ validateInt32,
+ validateNumber,
+ validateObject,
+ validateString,
+} = require("internal/validators");
+const child_process = require("internal/child_process");
+const { getValidStdio, setupChannel, ChildProcess, stdioStringToArray } =
+ child_process;
+
+const MAX_BUFFER = 1024 * 1024;
+
+const permission = require("internal/process/permission");
+
+const isZOS = process.platform === "os390";
+let addAbortListener;
+
+/**
+ * Spawns a new Node.js process + fork.
+ * @param {string|URL} modulePath
+ * @param {string[]} [args]
+ * @param {{
+ * cwd?: string | URL;
+ * detached?: boolean;
+ * env?: Record;
+ * execPath?: string;
+ * execArgv?: string[];
+ * gid?: number;
+ * serialization?: string;
+ * signal?: AbortSignal;
+ * killSignal?: string | number;
+ * silent?: boolean;
+ * stdio?: Array | string;
+ * uid?: number;
+ * windowsVerbatimArguments?: boolean;
+ * timeout?: number;
+ * }} [options]
+ * @returns {ChildProcess}
+ */
+function fork(modulePath, args = [], options) {
+ modulePath = getValidatedPath(modulePath, "modulePath");
+
+ // Get options and args arguments.
+ let execArgv;
+
+ if (args == null) {
+ args = [];
+ } else if (typeof args === "object" && !ArrayIsArray(args)) {
+ options = args;
+ args = [];
+ } else {
+ validateArray(args, "args");
+ }
+
+ if (options != null) {
+ validateObject(options, "options");
+ }
+ options = { __proto__: null, ...options, shell: false };
+ options.execPath ||= process.execPath;
+ validateArgumentNullCheck(options.execPath, "options.execPath");
+
+ // Prepare arguments for fork:
+ execArgv = options.execArgv || process.execArgv;
+ validateArgumentsNullCheck(execArgv, "options.execArgv");
+
+ if (execArgv === process.execArgv && process._eval != null) {
+ const index = ArrayPrototypeLastIndexOf(execArgv, process._eval);
+ if (index > 0) {
+ // Remove the -e switch to avoid fork bombing ourselves.
+ execArgv = ArrayPrototypeSlice(execArgv);
+ ArrayPrototypeSplice(execArgv, index - 1, 2);
+ }
+ }
+
+ args = [...execArgv, modulePath, ...args];
+
+ if (typeof options.stdio === "string") {
+ options.stdio = stdioStringToArray(options.stdio, "ipc");
+ } else if (!ArrayIsArray(options.stdio)) {
+ // Use a separate fd=3 for the IPC channel. Inherit stdin, stdout,
+ // and stderr from the parent if silent isn't set.
+ options.stdio = stdioStringToArray(
+ options.silent ? "pipe" : "inherit",
+ "ipc"
+ );
+ } else if (!ArrayPrototypeIncludes(options.stdio, "ipc")) {
+ throw new ERR_CHILD_PROCESS_IPC_REQUIRED("options.stdio");
+ }
+
+ return spawn(options.execPath, args, options);
+}
+
+function _forkChild(fd, serializationMode) {
+ // set process.send()
+ const p = new Pipe(PipeConstants.IPC);
+ p.open(fd);
+ p.unref();
+ const control = setupChannel(process, p, serializationMode);
+ process.on("newListener", function onNewListener(name) {
+ if (name === "message" || name === "disconnect") control.refCounted();
+ });
+ process.on("removeListener", function onRemoveListener(name) {
+ if (name === "message" || name === "disconnect") control.unrefCounted();
+ });
+}
+
+function normalizeExecArgs(command, options, callback) {
+ validateString(command, "command");
+ validateArgumentNullCheck(command, "command");
+
+ if (typeof options === "function") {
+ callback = options;
+ options = undefined;
+ }
+
+ // Make a shallow copy so we don't clobber the user's options object.
+ options = { __proto__: null, ...options };
+ options.shell = typeof options.shell === "string" ? options.shell : true;
+
+ return {
+ file: command,
+ options: options,
+ callback: callback,
+ };
+}
+
+/**
+ * Spawns a shell executing the given command.
+ * @param {string} command
+ * @param {{
+ * cmd?: string;
+ * env?: Record;
+ * encoding?: string;
+ * shell?: string;
+ * signal?: AbortSignal;
+ * timeout?: number;
+ * maxBuffer?: number;
+ * killSignal?: string | number;
+ * uid?: number;
+ * gid?: number;
+ * windowsHide?: boolean;
+ * }} [options]
+ * @param {(
+ * error?: Error,
+ * stdout?: string | Buffer,
+ * stderr?: string | Buffer
+ * ) => any} [callback]
+ * @returns {ChildProcess}
+ */
+function exec(command, options, callback) {
+ const opts = normalizeExecArgs(command, options, callback);
+ return module.exports.execFile(opts.file, opts.options, opts.callback);
+}
+
+const customPromiseExecFunction = (orig) => {
+ return assignFunctionName(orig.name, function (...args) {
+ const { promise, resolve, reject } = PromiseWithResolvers();
+
+ promise.child = orig(...args, (err, stdout, stderr) => {
+ if (err !== null) {
+ err.stdout = stdout;
+ err.stderr = stderr;
+ reject(err);
+ } else {
+ resolve({ stdout, stderr });
+ }
+ });
+
+ return promise;
+ });
+};
+
+ObjectDefineProperty(exec, promisify.custom, {
+ __proto__: null,
+ enumerable: false,
+ value: customPromiseExecFunction(exec),
+});
+
+function normalizeExecFileArgs(file, args, options, callback) {
+ if (ArrayIsArray(args)) {
+ args = ArrayPrototypeSlice(args);
+ } else if (args != null && typeof args === "object") {
+ callback = options;
+ options = args;
+ args = null;
+ } else if (typeof args === "function") {
+ callback = args;
+ options = null;
+ args = null;
+ }
+
+ args ??= [];
+
+ if (typeof options === "function") {
+ callback = options;
+ } else if (options != null) {
+ validateObject(options, "options");
+ }
+
+ options ??= kEmptyObject;
+
+ if (callback != null) {
+ validateFunction(callback, "callback");
+ }
+
+ // Validate argv0, if present.
+ if (options.argv0 != null) {
+ validateString(options.argv0, "options.argv0");
+ validateArgumentNullCheck(options.argv0, "options.argv0");
+ }
+
+ return { file, args, options, callback };
+}
+
+/**
+ * Spawns the specified file as a shell.
+ * @param {string} file
+ * @param {string[]} [args]
+ * @param {{
+ * cwd?: string | URL;
+ * env?: Record;
+ * encoding?: string;
+ * timeout?: number;
+ * maxBuffer?: number;
+ * killSignal?: string | number;
+ * uid?: number;
+ * gid?: number;
+ * windowsHide?: boolean;
+ * windowsVerbatimArguments?: boolean;
+ * shell?: boolean | string;
+ * signal?: AbortSignal;
+ * }} [options]
+ * @param {(
+ * error?: Error,
+ * stdout?: string | Buffer,
+ * stderr?: string | Buffer
+ * ) => any} [callback]
+ * @returns {ChildProcess}
+ */
+function execFile(file, args, options, callback) {
+ ({ file, args, options, callback } = normalizeExecFileArgs(
+ file,
+ args,
+ options,
+ callback
+ ));
+
+ options = {
+ __proto__: null,
+ encoding: "utf8",
+ timeout: 0,
+ maxBuffer: MAX_BUFFER,
+ killSignal: "SIGTERM",
+ cwd: null,
+ env: null,
+ shell: false,
+ ...options,
+ };
+
+ // Validate the timeout, if present.
+ validateTimeout(options.timeout);
+
+ // Validate maxBuffer, if present.
+ validateMaxBuffer(options.maxBuffer);
+
+ options.killSignal = sanitizeKillSignal(options.killSignal);
+
+ const child = spawn(file, args, {
+ cwd: options.cwd,
+ env: options.env,
+ gid: options.gid,
+ shell: options.shell,
+ signal: options.signal,
+ uid: options.uid,
+ windowsHide: !!options.windowsHide,
+ windowsVerbatimArguments: !!options.windowsVerbatimArguments,
+ });
+
+ let encoding;
+ const _stdout = [];
+ const _stderr = [];
+ if (options.encoding !== "buffer" && Buffer.isEncoding(options.encoding)) {
+ encoding = options.encoding;
+ } else {
+ encoding = null;
+ }
+ let stdoutLen = 0;
+ let stderrLen = 0;
+ let killed = false;
+ let exited = false;
+ let timeoutId;
+
+ let ex = null;
+
+ let cmd = file;
+
+ function exithandler(code, signal) {
+ if (exited) return;
+ exited = true;
+
+ if (timeoutId) {
+ clearTimeout(timeoutId);
+ timeoutId = null;
+ }
+
+ if (!callback) return;
+
+ // merge chunks
+ let stdout;
+ let stderr;
+ if (encoding || child.stdout?.readableEncoding) {
+ stdout = ArrayPrototypeJoin(_stdout, "");
+ } else {
+ stdout = Buffer.concat(_stdout);
+ }
+ if (encoding || child.stderr?.readableEncoding) {
+ stderr = ArrayPrototypeJoin(_stderr, "");
+ } else {
+ stderr = Buffer.concat(_stderr);
+ }
+
+ if (!ex && code === 0 && signal === null) {
+ callback(null, stdout, stderr);
+ return;
+ }
+
+ if (args?.length) cmd += ` ${ArrayPrototypeJoin(args, " ")}`;
+
+ ex ||= genericNodeError(`Command failed: ${cmd}\n${stderr}`, {
+ code: code < 0 ? getSystemErrorName(code) : code,
+ killed: child.killed || killed,
+ signal: signal,
+ });
+
+ ex.cmd = cmd;
+ callback(ex, stdout, stderr);
+ }
+
+ function errorhandler(e) {
+ ex = e;
+
+ if (child.stdout) child.stdout.destroy();
+
+ if (child.stderr) child.stderr.destroy();
+
+ exithandler();
+ }
+
+ function kill() {
+ if (child.stdout) child.stdout.destroy();
+
+ if (child.stderr) child.stderr.destroy();
+
+ killed = true;
+ try {
+ child.kill(options.killSignal);
+ } catch (e) {
+ ex = e;
+ exithandler();
+ }
+ }
+
+ if (options.timeout > 0) {
+ timeoutId = setTimeout(function delayedKill() {
+ kill();
+ timeoutId = null;
+ }, options.timeout);
+ }
+
+ if (child.stdout) {
+ if (encoding) child.stdout.setEncoding(encoding);
+
+ child.stdout.on("data", function onChildStdout(chunk) {
+ // Do not need to count the length
+ if (options.maxBuffer === Infinity) {
+ ArrayPrototypePush(_stdout, chunk);
+ return;
+ }
+ const encoding = child.stdout.readableEncoding;
+ const length = encoding
+ ? Buffer.byteLength(chunk, encoding)
+ : chunk.length;
+ const slice = encoding
+ ? StringPrototypeSlice
+ : (buf, ...args) => buf.slice(...args);
+ stdoutLen += length;
+
+ if (stdoutLen > options.maxBuffer) {
+ const truncatedLen = options.maxBuffer - (stdoutLen - length);
+ ArrayPrototypePush(_stdout, slice(chunk, 0, truncatedLen));
+
+ ex = new ERR_CHILD_PROCESS_STDIO_MAXBUFFER("stdout");
+ kill();
+ } else {
+ ArrayPrototypePush(_stdout, chunk);
+ }
+ });
+ }
+
+ if (child.stderr) {
+ if (encoding) child.stderr.setEncoding(encoding);
+
+ child.stderr.on("data", function onChildStderr(chunk) {
+ // Do not need to count the length
+ if (options.maxBuffer === Infinity) {
+ ArrayPrototypePush(_stderr, chunk);
+ return;
+ }
+ const encoding = child.stderr.readableEncoding;
+ const length = encoding
+ ? Buffer.byteLength(chunk, encoding)
+ : chunk.length;
+ stderrLen += length;
+
+ if (stderrLen > options.maxBuffer) {
+ const truncatedLen = options.maxBuffer - (stderrLen - length);
+ ArrayPrototypePush(_stderr, chunk.slice(0, truncatedLen));
+
+ ex = new ERR_CHILD_PROCESS_STDIO_MAXBUFFER("stderr");
+ kill();
+ } else {
+ ArrayPrototypePush(_stderr, chunk);
+ }
+ });
+ }
+
+ child.addListener("close", exithandler);
+ child.addListener("error", errorhandler);
+
+ return child;
+}
+
+ObjectDefineProperty(execFile, promisify.custom, {
+ __proto__: null,
+ enumerable: false,
+ value: customPromiseExecFunction(execFile),
+});
+
+function copyProcessEnvToEnv(env, name, optionEnv) {
+ if (
+ process.env[name] &&
+ (!optionEnv || !ObjectPrototypeHasOwnProperty(optionEnv, name))
+ ) {
+ env[name] = process.env[name];
+ }
+}
+
+let permissionModelFlagsToCopy;
+
+function getPermissionModelFlagsToCopy() {
+ if (permissionModelFlagsToCopy === undefined) {
+ permissionModelFlagsToCopy = [
+ ...permission.availableFlags(),
+ "--permission",
+ ];
+ }
+ return permissionModelFlagsToCopy;
+}
+
+function copyPermissionModelFlagsToEnv(env, key, args) {
+ // Do not override if permission was already passed to file
+ if (
+ args.includes("--permission") ||
+ (env[key] && env[key].indexOf("--permission") !== -1)
+ ) {
+ return;
+ }
+
+ const flagsToCopy = getPermissionModelFlagsToCopy();
+ for (const arg of process.execArgv) {
+ for (const flag of flagsToCopy) {
+ if (arg.startsWith(flag)) {
+ env[key] = `${env[key] ? env[key] + " " + arg : arg}`;
+ }
+ }
+ }
+}
+
+let emittedDEP0190Already = false;
+function normalizeSpawnArguments(file, args, options) {
+ validateString(file, "file");
+ validateArgumentNullCheck(file, "file");
+
+ if (file.length === 0)
+ throw new ERR_INVALID_ARG_VALUE("file", file, "cannot be empty");
+
+ if (ArrayIsArray(args)) {
+ args = ArrayPrototypeSlice(args);
+ } else if (args == null) {
+ args = [];
+ } else if (typeof args !== "object") {
+ throw new ERR_INVALID_ARG_TYPE("args", "object", args);
+ } else {
+ options = args;
+ args = [];
+ }
+
+ validateArgumentsNullCheck(args, "args");
+
+ if (options === undefined) options = kEmptyObject;
+ else validateObject(options, "options");
+
+ options = { __proto__: null, ...options };
+ let cwd = options.cwd;
+
+ // Validate the cwd, if present.
+ if (cwd != null) {
+ cwd = getValidatedPath(cwd, "options.cwd");
+ }
+
+ // Validate detached, if present.
+ if (options.detached != null) {
+ validateBoolean(options.detached, "options.detached");
+ }
+
+ // Validate the uid, if present.
+ if (options.uid != null) {
+ validateInt32(options.uid, "options.uid");
+ }
+
+ // Validate the gid, if present.
+ if (options.gid != null) {
+ validateInt32(options.gid, "options.gid");
+ }
+
+ // Validate the shell, if present.
+ if (
+ options.shell != null &&
+ typeof options.shell !== "boolean" &&
+ typeof options.shell !== "string"
+ ) {
+ throw new ERR_INVALID_ARG_TYPE(
+ "options.shell",
+ ["boolean", "string"],
+ options.shell
+ );
+ }
+
+ // Validate argv0, if present.
+ if (options.argv0 != null) {
+ validateString(options.argv0, "options.argv0");
+ validateArgumentNullCheck(options.argv0, "options.argv0");
+ }
+
+ // Validate windowsHide, if present.
+ if (options.windowsHide != null) {
+ validateBoolean(options.windowsHide, "options.windowsHide");
+ }
+
+ // Validate windowsVerbatimArguments, if present.
+ let { windowsVerbatimArguments } = options;
+ if (windowsVerbatimArguments != null) {
+ validateBoolean(
+ windowsVerbatimArguments,
+ "options.windowsVerbatimArguments"
+ );
+ }
+
+ if (options.shell) {
+ validateArgumentNullCheck(options.shell, "options.shell");
+ if (args.length > 0 && !emittedDEP0190Already) {
+ process.emitWarning(
+ "Passing args to a child process with shell option true can lead to security " +
+ "vulnerabilities, as the arguments are not escaped, only concatenated.",
+ "DeprecationWarning",
+ "DEP0190"
+ );
+ emittedDEP0190Already = true;
+ }
+
+ const command =
+ args.length > 0 ? `${file} ${ArrayPrototypeJoin(args, " ")}` : file;
+ // Set the shell, switches, and commands.
+ if (process.platform === "win32") {
+ if (typeof options.shell === "string") file = options.shell;
+ else file = process.env.comspec || "cmd.exe";
+ // '/d /s /c' is used only for cmd.exe.
+ if (RegExpPrototypeExec(/^(?:.*\\)?cmd(?:\.exe)?$/i, file) !== null) {
+ args = ["/d", "/s", "/c", `"${command}"`];
+ windowsVerbatimArguments = true;
+ } else {
+ args = ["-c", command];
+ }
+ } else {
+ if (typeof options.shell === "string") file = options.shell;
+ else if (process.platform === "android") file = "/system/bin/sh";
+ else file = "/bin/sh";
+ args = ["-c", command];
+ }
+ }
+
+ if (typeof options.argv0 === "string") {
+ ArrayPrototypeUnshift(args, options.argv0);
+ } else {
+ ArrayPrototypeUnshift(args, file);
+ }
+
+ // Shallow copy to guarantee changes won't impact process.env
+ const env = options.env || { ...process.env };
+ const envPairs = [];
+
+ // process.env.NODE_V8_COVERAGE always propagates, making it possible to
+ // collect coverage for programs that spawn with white-listed environment.
+ copyProcessEnvToEnv(env, "NODE_V8_COVERAGE", options.env);
+
+ if (isZOS) {
+ // The following environment variables must always propagate if set.
+ copyProcessEnvToEnv(env, "_BPXK_AUTOCVT", options.env);
+ copyProcessEnvToEnv(env, "_CEE_RUNOPTS", options.env);
+ copyProcessEnvToEnv(env, "_TAG_REDIR_ERR", options.env);
+ copyProcessEnvToEnv(env, "_TAG_REDIR_IN", options.env);
+ copyProcessEnvToEnv(env, "_TAG_REDIR_OUT", options.env);
+ copyProcessEnvToEnv(env, "STEPLIB", options.env);
+ copyProcessEnvToEnv(env, "LIBPATH", options.env);
+ copyProcessEnvToEnv(env, "_EDC_SIG_DFLT", options.env);
+ copyProcessEnvToEnv(env, "_EDC_SUSV3", options.env);
+ }
+
+ if (permission.isEnabled()) {
+ copyPermissionModelFlagsToEnv(env, "NODE_OPTIONS", args);
+ }
+
+ let envKeys = [];
+ // Prototype values are intentionally included.
+ for (const key in env) {
+ ArrayPrototypePush(envKeys, key);
+ }
+
+ if (process.platform === "win32") {
+ // On Windows env keys are case insensitive. Filter out duplicates,
+ // keeping only the first one (in lexicographic order)
+ const sawKey = new SafeSet();
+ envKeys = ArrayPrototypeFilter(ArrayPrototypeSort(envKeys), (key) => {
+ const uppercaseKey = StringPrototypeToUpperCase(key);
+ if (sawKey.has(uppercaseKey)) {
+ return false;
+ }
+ sawKey.add(uppercaseKey);
+ return true;
+ });
+ }
+
+ for (const key of envKeys) {
+ const value = env[key];
+ if (value !== undefined) {
+ validateArgumentNullCheck(key, `options.env['${key}']`);
+ validateArgumentNullCheck(value, `options.env['${key}']`);
+ ArrayPrototypePush(envPairs, `${key}=${value}`);
+ }
+ }
+
+ return {
+ // Make a shallow copy so we don't clobber the user's options object.
+ __proto__: null,
+ ...options,
+ args,
+ cwd,
+ detached: !!options.detached,
+ envPairs,
+ file,
+ windowsHide: !!options.windowsHide,
+ windowsVerbatimArguments: !!windowsVerbatimArguments,
+ };
+}
+
+function abortChildProcess(child, killSignal, reason) {
+ if (!child) return;
+ try {
+ if (child.kill(killSignal)) {
+ child.emit("error", new AbortError(undefined, { cause: reason }));
+ }
+ } catch (err) {
+ child.emit("error", err);
+ }
+}
+
+/**
+ * Spawns a new process using the given `file`.
+ * @param {string} file
+ * @param {string[]} [args]
+ * @param {{
+ * cwd?: string | URL;
+ * env?: Record;
+ * argv0?: string;
+ * stdio?: Array | string;
+ * detached?: boolean;
+ * uid?: number;
+ * gid?: number;
+ * serialization?: string;
+ * shell?: boolean | string;
+ * windowsVerbatimArguments?: boolean;
+ * windowsHide?: boolean;
+ * signal?: AbortSignal;
+ * timeout?: number;
+ * killSignal?: string | number;
+ * }} [options]
+ * @returns {ChildProcess}
+ */
+function spawn(file, args, options) {
+ options = normalizeSpawnArguments(file, args, options);
+ validateTimeout(options.timeout);
+ validateAbortSignal(options.signal, "options.signal");
+ const killSignal = sanitizeKillSignal(options.killSignal);
+ const child = new ChildProcess();
+
+ debug("spawn", options);
+ child.spawn(options);
+
+ if (options.timeout > 0) {
+ let timeoutId = setTimeout(() => {
+ if (timeoutId) {
+ try {
+ child.kill(killSignal);
+ } catch (err) {
+ child.emit("error", err);
+ }
+ timeoutId = null;
+ }
+ }, options.timeout);
+
+ child.once("exit", () => {
+ if (timeoutId) {
+ clearTimeout(timeoutId);
+ timeoutId = null;
+ }
+ });
+ }
+
+ if (options.signal) {
+ const signal = options.signal;
+ if (signal.aborted) {
+ process.nextTick(onAbortListener);
+ } else {
+ addAbortListener ??=
+ require("internal/events/abort_listener").addAbortListener;
+ const disposable = addAbortListener(signal, onAbortListener);
+ child.once("exit", disposable[SymbolDispose]);
+ }
+
+ function onAbortListener() {
+ abortChildProcess(child, killSignal, options.signal.reason);
+ }
+ }
+
+ return child;
+}
+
+/**
+ * Spawns a new process synchronously using the given `file`.
+ * @param {string} file
+ * @param {string[]} [args]
+ * @param {{
+ * cwd?: string | URL;
+ * input?: string | Buffer | TypedArray | DataView;
+ * argv0?: string;
+ * stdio?: string | Array;
+ * env?: Record;
+ * uid?: number;
+ * gid?: number;
+ * timeout?: number;
+ * killSignal?: string | number;
+ * maxBuffer?: number;
+ * encoding?: string;
+ * shell?: boolean | string;
+ * windowsVerbatimArguments?: boolean;
+ * windowsHide?: boolean;
+ * }} [options]
+ * @returns {{
+ * pid: number;
+ * output: Array;
+ * stdout: Buffer | string;
+ * stderr: Buffer | string;
+ * status: number | null;
+ * signal: string | null;
+ * error: Error;
+ * }}
+ */
+function spawnSync(file, args, options) {
+ options = {
+ __proto__: null,
+ maxBuffer: MAX_BUFFER,
+ ...normalizeSpawnArguments(file, args, options),
+ };
+
+ debug("spawnSync", options);
+
+ // Validate the timeout, if present.
+ validateTimeout(options.timeout);
+
+ // Validate maxBuffer, if present.
+ validateMaxBuffer(options.maxBuffer);
+
+ // Validate and translate the kill signal, if present.
+ options.killSignal = sanitizeKillSignal(options.killSignal);
+
+ options.stdio = getValidStdio(options.stdio || "pipe", true).stdio;
+
+ if (options.input) {
+ const stdin = (options.stdio[0] = { ...options.stdio[0] });
+ stdin.input = options.input;
+ }
+
+ // We may want to pass data in on any given fd, ensure it is a valid buffer
+ for (let i = 0; i < options.stdio.length; i++) {
+ const input = options.stdio[i]?.input;
+ if (input != null) {
+ const pipe = (options.stdio[i] = { ...options.stdio[i] });
+ if (isArrayBufferView(input)) {
+ pipe.input = input;
+ } else if (typeof input === "string") {
+ pipe.input = Buffer.from(input, options.encoding);
+ } else {
+ throw new ERR_INVALID_ARG_TYPE(
+ `options.stdio[${i}]`,
+ ["Buffer", "TypedArray", "DataView", "string"],
+ input
+ );
+ }
+ }
+ }
+
+ return child_process.spawnSync(options);
+}
+
+function checkExecSyncError(ret, args, cmd) {
+ let err;
+ if (ret.error) {
+ err = ret.error;
+ ObjectAssign(err, ret);
+ } else if (ret.status !== 0) {
+ let msg = "Command failed: ";
+ msg += cmd || ArrayPrototypeJoin(args, " ");
+ if (ret.stderr && ret.stderr.length > 0)
+ msg += `\n${ret.stderr.toString()}`;
+ err = genericNodeError(msg, ret);
+ }
+ return err;
+}
+
+/**
+ * Spawns a file as a shell synchronously.
+ * @param {string} file
+ * @param {string[]} [args]
+ * @param {{
+ * cwd?: string | URL;
+ * input?: string | Buffer | TypedArray | DataView;
+ * stdio?: string | Array;
+ * env?: Record;
+ * uid?: number;
+ * gid?: number;
+ * timeout?: number;
+ * killSignal?: string | number;
+ * maxBuffer?: number;
+ * encoding?: string;
+ * windowsHide?: boolean;
+ * shell?: boolean | string;
+ * }} [options]
+ * @returns {Buffer | string}
+ */
+function execFileSync(file, args, options) {
+ ({ file, args, options } = normalizeExecFileArgs(file, args, options));
+
+ const inheritStderr = !options.stdio;
+ const ret = spawnSync(file, args, options);
+
+ if (inheritStderr && ret.stderr) process.stderr.write(ret.stderr);
+
+ const errArgs = [options.argv0 || file];
+ ArrayPrototypePushApply(errArgs, args);
+ const err = checkExecSyncError(ret, errArgs);
+
+ if (err) throw err;
+
+ return ret.stdout;
+}
+
+/**
+ * Spawns a shell executing the given `command` synchronously.
+ * @param {string} command
+ * @param {{
+ * cwd?: string | URL;
+ * input?: string | Buffer | TypedArray | DataView;
+ * stdio?: string | Array;
+ * env?: Record;
+ * shell?: string;
+ * uid?: number;
+ * gid?: number;
+ * timeout?: number;
+ * killSignal?: string | number;
+ * maxBuffer?: number;
+ * encoding?: string;
+ * windowsHide?: boolean;
+ * }} [options]
+ * @returns {Buffer | string}
+ */
+function execSync(command, options) {
+ const opts = normalizeExecArgs(command, options, null);
+ const inheritStderr = !opts.options.stdio;
+
+ const ret = spawnSync(opts.file, opts.options);
+
+ if (inheritStderr && ret.stderr) process.stderr.write(ret.stderr);
+
+ const err = checkExecSyncError(ret, undefined, command);
+
+ if (err) throw err;
+
+ return ret.stdout;
+}
+
+function validateArgumentNullCheck(arg, propName) {
+ if (typeof arg === "string" && StringPrototypeIncludes(arg, "\u0000")) {
+ throw new ERR_INVALID_ARG_VALUE(
+ propName,
+ arg,
+ "must be a string without null bytes"
+ );
+ }
+}
+
+function validateArgumentsNullCheck(args, propName) {
+ for (let i = 0; i < args.length; ++i) {
+ validateArgumentNullCheck(args[i], `${propName}[${i}]`);
+ }
+}
+
+function validateTimeout(timeout) {
+ if (timeout != null) {
+ validateInteger(timeout, "timeout", 0);
+ }
+}
+
+function validateMaxBuffer(maxBuffer) {
+ if (maxBuffer != null) {
+ validateNumber(maxBuffer, "options.maxBuffer", 0);
+ }
+}
+
+function sanitizeKillSignal(killSignal) {
+ if (typeof killSignal === "string" || typeof killSignal === "number") {
+ return convertToValidSignal(killSignal);
+ } else if (killSignal != null) {
+ throw new ERR_INVALID_ARG_TYPE(
+ "options.killSignal",
+ ["string", "number"],
+ killSignal
+ );
+ }
+}
+
+module.exports = {
+ _forkChild,
+ ChildProcess,
+ exec,
+ execFile,
+ execFileSync,
+ execSync,
+ fork,
+ spawn,
+ spawnSync,
+};
diff --git a/.codesandbox/node/console.js b/.codesandbox/node/console.js
new file mode 100644
index 0000000..d896d2e
--- /dev/null
+++ b/.codesandbox/node/console.js
@@ -0,0 +1,3 @@
+"use strict";
+
+module.exports = require("internal/console/global");
diff --git a/.codesandbox/node/diagnostics_channel.js b/.codesandbox/node/diagnostics_channel.js
new file mode 100644
index 0000000..1422b0d
--- /dev/null
+++ b/.codesandbox/node/diagnostics_channel.js
@@ -0,0 +1,439 @@
+"use strict";
+
+const {
+ ArrayPrototypeAt,
+ ArrayPrototypeIndexOf,
+ ArrayPrototypePush,
+ ArrayPrototypePushApply,
+ ArrayPrototypeSlice,
+ ArrayPrototypeSplice,
+ ObjectDefineProperty,
+ ObjectGetPrototypeOf,
+ ObjectSetPrototypeOf,
+ Promise,
+ PromisePrototypeThen,
+ PromiseReject,
+ PromiseResolve,
+ ReflectApply,
+ SafeFinalizationRegistry,
+ SafeMap,
+ SymbolHasInstance,
+} = primordials;
+
+const {
+ codes: { ERR_INVALID_ARG_TYPE },
+} = require("internal/errors");
+const { validateFunction } = require("internal/validators");
+
+const { triggerUncaughtException } = internalBinding("errors");
+
+const { WeakReference } = require("internal/util");
+
+// Can't delete when weakref count reaches 0 as it could increment again.
+// Only GC can be used as a valid time to clean up the channels map.
+class WeakRefMap extends SafeMap {
+ #finalizers = new SafeFinalizationRegistry((key) => {
+ // Check that the key doesn't have any value before deleting, as the WeakRef for the key
+ // may have been replaced since finalization callbacks aren't synchronous with GC.
+ if (!this.has(key)) this.delete(key);
+ });
+
+ set(key, value) {
+ this.#finalizers.register(value, key);
+ return super.set(key, new WeakReference(value));
+ }
+
+ get(key) {
+ return super.get(key)?.get();
+ }
+
+ has(key) {
+ return !!this.get(key);
+ }
+
+ incRef(key) {
+ return super.get(key)?.incRef();
+ }
+
+ decRef(key) {
+ return super.get(key)?.decRef();
+ }
+}
+
+function markActive(channel) {
+ // eslint-disable-next-line no-use-before-define
+ ObjectSetPrototypeOf(channel, ActiveChannel.prototype);
+ channel._subscribers = [];
+ channel._stores = new SafeMap();
+}
+
+function maybeMarkInactive(channel) {
+ // When there are no more active subscribers or bound, restore to fast prototype.
+ if (!channel._subscribers.length && !channel._stores.size) {
+ // eslint-disable-next-line no-use-before-define
+ ObjectSetPrototypeOf(channel, Channel.prototype);
+ channel._subscribers = undefined;
+ channel._stores = undefined;
+ }
+}
+
+function defaultTransform(data) {
+ return data;
+}
+
+function wrapStoreRun(store, data, next, transform = defaultTransform) {
+ return () => {
+ let context;
+ try {
+ context = transform(data);
+ } catch (err) {
+ process.nextTick(() => {
+ triggerUncaughtException(err, false);
+ });
+ return next();
+ }
+
+ return store.run(context, next);
+ };
+}
+
+// TODO(qard): should there be a C++ channel interface?
+class ActiveChannel {
+ subscribe(subscription) {
+ validateFunction(subscription, "subscription");
+ this._subscribers = ArrayPrototypeSlice(this._subscribers);
+ ArrayPrototypePush(this._subscribers, subscription);
+ channels.incRef(this.name);
+ }
+
+ unsubscribe(subscription) {
+ const index = ArrayPrototypeIndexOf(this._subscribers, subscription);
+ if (index === -1) return false;
+
+ const before = ArrayPrototypeSlice(this._subscribers, 0, index);
+ const after = ArrayPrototypeSlice(this._subscribers, index + 1);
+ this._subscribers = before;
+ ArrayPrototypePushApply(this._subscribers, after);
+
+ channels.decRef(this.name);
+ maybeMarkInactive(this);
+
+ return true;
+ }
+
+ bindStore(store, transform) {
+ const replacing = this._stores.has(store);
+ if (!replacing) channels.incRef(this.name);
+ this._stores.set(store, transform);
+ }
+
+ unbindStore(store) {
+ if (!this._stores.has(store)) {
+ return false;
+ }
+
+ this._stores.delete(store);
+
+ channels.decRef(this.name);
+ maybeMarkInactive(this);
+
+ return true;
+ }
+
+ get hasSubscribers() {
+ return true;
+ }
+
+ publish(data) {
+ const subscribers = this._subscribers;
+ for (let i = 0; i < (subscribers?.length || 0); i++) {
+ try {
+ const onMessage = subscribers[i];
+ onMessage(data, this.name);
+ } catch (err) {
+ process.nextTick(() => {
+ triggerUncaughtException(err, false);
+ });
+ }
+ }
+ }
+
+ runStores(data, fn, thisArg, ...args) {
+ let run = () => {
+ this.publish(data);
+ return ReflectApply(fn, thisArg, args);
+ };
+
+ for (const entry of this._stores.entries()) {
+ const store = entry[0];
+ const transform = entry[1];
+ run = wrapStoreRun(store, data, run, transform);
+ }
+
+ return run();
+ }
+}
+
+class Channel {
+ constructor(name) {
+ this._subscribers = undefined;
+ this._stores = undefined;
+ this.name = name;
+
+ channels.set(name, this);
+ }
+
+ static [SymbolHasInstance](instance) {
+ const prototype = ObjectGetPrototypeOf(instance);
+ return (
+ prototype === Channel.prototype || prototype === ActiveChannel.prototype
+ );
+ }
+
+ subscribe(subscription) {
+ markActive(this);
+ this.subscribe(subscription);
+ }
+
+ unsubscribe() {
+ return false;
+ }
+
+ bindStore(store, transform) {
+ markActive(this);
+ this.bindStore(store, transform);
+ }
+
+ unbindStore() {
+ return false;
+ }
+
+ get hasSubscribers() {
+ return false;
+ }
+
+ publish() {}
+
+ runStores(data, fn, thisArg, ...args) {
+ return ReflectApply(fn, thisArg, args);
+ }
+}
+
+const channels = new WeakRefMap();
+
+function channel(name) {
+ const channel = channels.get(name);
+ if (channel) return channel;
+
+ if (typeof name !== "string" && typeof name !== "symbol") {
+ throw new ERR_INVALID_ARG_TYPE("channel", ["string", "symbol"], name);
+ }
+
+ return new Channel(name);
+}
+
+function subscribe(name, subscription) {
+ return channel(name).subscribe(subscription);
+}
+
+function unsubscribe(name, subscription) {
+ return channel(name).unsubscribe(subscription);
+}
+
+function hasSubscribers(name) {
+ const channel = channels.get(name);
+ if (!channel) return false;
+
+ return channel.hasSubscribers;
+}
+
+const traceEvents = ["start", "end", "asyncStart", "asyncEnd", "error"];
+
+function assertChannel(value, name) {
+ if (!(value instanceof Channel)) {
+ throw new ERR_INVALID_ARG_TYPE(name, ["Channel"], value);
+ }
+}
+
+function tracingChannelFrom(nameOrChannels, name) {
+ if (typeof nameOrChannels === "string") {
+ return channel(`tracing:${nameOrChannels}:${name}`);
+ }
+
+ if (typeof nameOrChannels === "object" && nameOrChannels !== null) {
+ const channel = nameOrChannels[name];
+ assertChannel(channel, `nameOrChannels.${name}`);
+ return channel;
+ }
+
+ throw new ERR_INVALID_ARG_TYPE(
+ "nameOrChannels",
+ ["string", "object", "TracingChannel"],
+ nameOrChannels
+ );
+}
+
+class TracingChannel {
+ constructor(nameOrChannels) {
+ for (let i = 0; i < traceEvents.length; ++i) {
+ const eventName = traceEvents[i];
+ ObjectDefineProperty(this, eventName, {
+ __proto__: null,
+ value: tracingChannelFrom(nameOrChannels, eventName),
+ });
+ }
+ }
+
+ get hasSubscribers() {
+ return (
+ this.start?.hasSubscribers ||
+ this.end?.hasSubscribers ||
+ this.asyncStart?.hasSubscribers ||
+ this.asyncEnd?.hasSubscribers ||
+ this.error?.hasSubscribers
+ );
+ }
+
+ subscribe(handlers) {
+ for (let i = 0; i < traceEvents.length; ++i) {
+ const name = traceEvents[i];
+ if (!handlers[name]) continue;
+
+ this[name]?.subscribe(handlers[name]);
+ }
+ }
+
+ unsubscribe(handlers) {
+ let done = true;
+
+ for (let i = 0; i < traceEvents.length; ++i) {
+ const name = traceEvents[i];
+ if (!handlers[name]) continue;
+
+ if (!this[name]?.unsubscribe(handlers[name])) {
+ done = false;
+ }
+ }
+
+ return done;
+ }
+
+ traceSync(fn, context = {}, thisArg, ...args) {
+ if (!this.hasSubscribers) {
+ return ReflectApply(fn, thisArg, args);
+ }
+
+ const { start, end, error } = this;
+
+ return start.runStores(context, () => {
+ try {
+ const result = ReflectApply(fn, thisArg, args);
+ context.result = result;
+ return result;
+ } catch (err) {
+ context.error = err;
+ error.publish(context);
+ throw err;
+ } finally {
+ end.publish(context);
+ }
+ });
+ }
+
+ tracePromise(fn, context = {}, thisArg, ...args) {
+ if (!this.hasSubscribers) {
+ return ReflectApply(fn, thisArg, args);
+ }
+
+ const { start, end, asyncStart, asyncEnd, error } = this;
+
+ function reject(err) {
+ context.error = err;
+ error.publish(context);
+ asyncStart.publish(context);
+ // TODO: Is there a way to have asyncEnd _after_ the continuation?
+ asyncEnd.publish(context);
+ return PromiseReject(err);
+ }
+
+ function resolve(result) {
+ context.result = result;
+ asyncStart.publish(context);
+ // TODO: Is there a way to have asyncEnd _after_ the continuation?
+ asyncEnd.publish(context);
+ return result;
+ }
+
+ return start.runStores(context, () => {
+ try {
+ let promise = ReflectApply(fn, thisArg, args);
+ // Convert thenables to native promises
+ if (!(promise instanceof Promise)) {
+ promise = PromiseResolve(promise);
+ }
+ return PromisePrototypeThen(promise, resolve, reject);
+ } catch (err) {
+ context.error = err;
+ error.publish(context);
+ throw err;
+ } finally {
+ end.publish(context);
+ }
+ });
+ }
+
+ traceCallback(fn, position = -1, context = {}, thisArg, ...args) {
+ if (!this.hasSubscribers) {
+ return ReflectApply(fn, thisArg, args);
+ }
+
+ const { start, end, asyncStart, asyncEnd, error } = this;
+
+ function wrappedCallback(err, res) {
+ if (err) {
+ context.error = err;
+ error.publish(context);
+ } else {
+ context.result = res;
+ }
+
+ // Using runStores here enables manual context failure recovery
+ asyncStart.runStores(context, () => {
+ try {
+ return ReflectApply(callback, this, arguments);
+ } finally {
+ asyncEnd.publish(context);
+ }
+ });
+ }
+
+ const callback = ArrayPrototypeAt(args, position);
+ validateFunction(callback, "callback");
+ ArrayPrototypeSplice(args, position, 1, wrappedCallback);
+
+ return start.runStores(context, () => {
+ try {
+ return ReflectApply(fn, thisArg, args);
+ } catch (err) {
+ context.error = err;
+ error.publish(context);
+ throw err;
+ } finally {
+ end.publish(context);
+ }
+ });
+ }
+}
+
+function tracingChannel(nameOrChannels) {
+ return new TracingChannel(nameOrChannels);
+}
+
+module.exports = {
+ channel,
+ hasSubscribers,
+ subscribe,
+ tracingChannel,
+ unsubscribe,
+ Channel,
+};
diff --git a/.codesandbox/node/dns.js b/.codesandbox/node/dns.js
new file mode 100644
index 0000000..22d23de
--- /dev/null
+++ b/.codesandbox/node/dns.js
@@ -0,0 +1,345 @@
+"use strict";
+
+const { ObjectDefineProperties, ObjectDefineProperty, Symbol } = primordials;
+
+const cares = internalBinding("cares_wrap");
+const { isIP } = require("internal/net");
+const { customPromisifyArgs } = require("internal/util");
+const {
+ DNSException,
+ codes: { ERR_INVALID_ARG_TYPE, ERR_INVALID_ARG_VALUE, ERR_MISSING_ARGS },
+} = require("internal/errors");
+const {
+ bindDefaultResolver,
+ setDefaultResolver,
+ validateHints,
+ getDefaultResultOrder,
+ setDefaultResultOrder,
+ errorCodes: dnsErrorCodes,
+ validDnsOrders,
+ validFamilies,
+} = require("internal/dns/utils");
+const { Resolver } = require("internal/dns/callback_resolver");
+const {
+ NODATA,
+ FORMERR,
+ SERVFAIL,
+ NOTFOUND,
+ NOTIMP,
+ REFUSED,
+ BADQUERY,
+ BADNAME,
+ BADFAMILY,
+ BADRESP,
+ CONNREFUSED,
+ TIMEOUT,
+ EOF,
+ FILE,
+ NOMEM,
+ DESTRUCTION,
+ BADSTR,
+ BADFLAGS,
+ NONAME,
+ BADHINTS,
+ NOTINITIALIZED,
+ LOADIPHLPAPI,
+ ADDRGETNETWORKPARAMS,
+ CANCELLED,
+} = dnsErrorCodes;
+const {
+ validateBoolean,
+ validateFunction,
+ validateNumber,
+ validateOneOf,
+ validatePort,
+ validateString,
+} = require("internal/validators");
+
+const {
+ GetAddrInfoReqWrap,
+ GetNameInfoReqWrap,
+ DNS_ORDER_VERBATIM,
+ DNS_ORDER_IPV4_FIRST,
+ DNS_ORDER_IPV6_FIRST,
+} = cares;
+
+const kPerfHooksDnsLookupContext = Symbol("kPerfHooksDnsLookupContext");
+const kPerfHooksDnsLookupServiceContext = Symbol(
+ "kPerfHooksDnsLookupServiceContext"
+);
+
+const { hasObserver, startPerf, stopPerf } = require("internal/perf/observe");
+
+let promises = null; // Lazy loaded
+
+function onlookup(err, addresses) {
+ if (err) {
+ return this.callback(new DNSException(err, "getaddrinfo", this.hostname));
+ }
+ this.callback(null, addresses[0], this.family || isIP(addresses[0]));
+ if (this[kPerfHooksDnsLookupContext] && hasObserver("dns")) {
+ stopPerf(this, kPerfHooksDnsLookupContext, { detail: { addresses } });
+ }
+}
+
+function onlookupall(err, addresses) {
+ if (err) {
+ return this.callback(new DNSException(err, "getaddrinfo", this.hostname));
+ }
+
+ const family = this.family;
+ for (let i = 0; i < addresses.length; i++) {
+ const addr = addresses[i];
+ addresses[i] = {
+ address: addr,
+ family: family || isIP(addr),
+ };
+ }
+
+ this.callback(null, addresses);
+ if (this[kPerfHooksDnsLookupContext] && hasObserver("dns")) {
+ stopPerf(this, kPerfHooksDnsLookupContext, { detail: { addresses } });
+ }
+}
+
+// Easy DNS A/AAAA look up
+// lookup(hostname, [options,] callback)
+function lookup(hostname, options, callback) {
+ let hints = 0;
+ let family = 0;
+ let all = false;
+ let dnsOrder = getDefaultResultOrder();
+
+ // Parse arguments
+ if (hostname) {
+ validateString(hostname, "hostname");
+ }
+
+ if (typeof options === "function") {
+ callback = options;
+ family = 0;
+ } else if (typeof options === "number") {
+ validateFunction(callback, "callback");
+
+ validateOneOf(options, "family", validFamilies);
+ family = options;
+ } else if (options !== undefined && typeof options !== "object") {
+ validateFunction(arguments.length === 2 ? options : callback, "callback");
+ throw new ERR_INVALID_ARG_TYPE("options", ["integer", "object"], options);
+ } else {
+ validateFunction(callback, "callback");
+
+ if (options?.hints != null) {
+ validateNumber(options.hints, "options.hints");
+ hints = options.hints >>> 0;
+ validateHints(hints);
+ }
+ if (options?.family != null) {
+ switch (options.family) {
+ case "IPv4":
+ family = 4;
+ break;
+ case "IPv6":
+ family = 6;
+ break;
+ default:
+ validateOneOf(options.family, "options.family", validFamilies);
+ family = options.family;
+ break;
+ }
+ }
+ if (options?.all != null) {
+ validateBoolean(options.all, "options.all");
+ all = options.all;
+ }
+ if (options?.verbatim != null) {
+ validateBoolean(options.verbatim, "options.verbatim");
+ dnsOrder = options.verbatim ? "verbatim" : "ipv4first";
+ }
+ if (options?.order != null) {
+ validateOneOf(options.order, "options.order", validDnsOrders);
+ dnsOrder = options.order;
+ }
+ }
+
+ if (!hostname) {
+ throw new ERR_INVALID_ARG_VALUE(
+ "hostname",
+ hostname,
+ "must be a non-empty string"
+ );
+ }
+
+ const matchedFamily = isIP(hostname);
+ if (matchedFamily) {
+ if (all) {
+ process.nextTick(callback, null, [
+ { address: hostname, family: matchedFamily },
+ ]);
+ } else {
+ process.nextTick(callback, null, hostname, matchedFamily);
+ }
+ return {};
+ }
+
+ const req = new GetAddrInfoReqWrap();
+ req.callback = callback;
+ req.family = family;
+ req.hostname = hostname;
+ req.oncomplete = all ? onlookupall : onlookup;
+
+ let order = DNS_ORDER_VERBATIM;
+
+ if (dnsOrder === "ipv4first") {
+ order = DNS_ORDER_IPV4_FIRST;
+ } else if (dnsOrder === "ipv6first") {
+ order = DNS_ORDER_IPV6_FIRST;
+ }
+
+ const err = cares.getaddrinfo(req, hostname, family, hints, order);
+ if (err) {
+ process.nextTick(callback, new DNSException(err, "getaddrinfo", hostname));
+ return {};
+ }
+ if (hasObserver("dns")) {
+ const detail = {
+ hostname,
+ family,
+ hints,
+ verbatim: order === DNS_ORDER_VERBATIM,
+ order: dnsOrder,
+ };
+
+ startPerf(req, kPerfHooksDnsLookupContext, {
+ type: "dns",
+ name: "lookup",
+ detail,
+ });
+ }
+ return req;
+}
+
+ObjectDefineProperty(lookup, customPromisifyArgs, {
+ __proto__: null,
+ value: ["address", "family"],
+ enumerable: false,
+});
+
+function onlookupservice(err, hostname, service) {
+ if (err)
+ return this.callback(new DNSException(err, "getnameinfo", this.hostname));
+
+ this.callback(null, hostname, service);
+ if (this[kPerfHooksDnsLookupServiceContext] && hasObserver("dns")) {
+ stopPerf(this, kPerfHooksDnsLookupServiceContext, {
+ detail: { hostname, service },
+ });
+ }
+}
+
+function lookupService(address, port, callback) {
+ if (arguments.length !== 3)
+ throw new ERR_MISSING_ARGS("address", "port", "callback");
+
+ if (isIP(address) === 0) throw new ERR_INVALID_ARG_VALUE("address", address);
+
+ validatePort(port);
+
+ validateFunction(callback, "callback");
+
+ port = +port;
+
+ const req = new GetNameInfoReqWrap();
+ req.callback = callback;
+ req.hostname = address;
+ req.port = port;
+ req.oncomplete = onlookupservice;
+
+ const err = cares.getnameinfo(req, address, port);
+ if (err) throw new DNSException(err, "getnameinfo", address);
+ if (hasObserver("dns")) {
+ startPerf(req, kPerfHooksDnsLookupServiceContext, {
+ type: "dns",
+ name: "lookupService",
+ detail: {
+ host: address,
+ port,
+ },
+ });
+ }
+ return req;
+}
+
+ObjectDefineProperty(lookupService, customPromisifyArgs, {
+ __proto__: null,
+ value: ["hostname", "service"],
+ enumerable: false,
+});
+
+function defaultResolverSetServers(servers) {
+ const resolver = new Resolver();
+
+ resolver.setServers(servers);
+ setDefaultResolver(resolver);
+ bindDefaultResolver(module.exports, Resolver.prototype);
+
+ if (promises !== null)
+ bindDefaultResolver(promises, promises.Resolver.prototype);
+}
+
+module.exports = {
+ lookup,
+ lookupService,
+
+ Resolver,
+ getDefaultResultOrder,
+ setDefaultResultOrder,
+ setServers: defaultResolverSetServers,
+
+ // uv_getaddrinfo flags
+ ADDRCONFIG: cares.AI_ADDRCONFIG,
+ ALL: cares.AI_ALL,
+ V4MAPPED: cares.AI_V4MAPPED,
+
+ // ERROR CODES
+ NODATA,
+ FORMERR,
+ SERVFAIL,
+ NOTFOUND,
+ NOTIMP,
+ REFUSED,
+ BADQUERY,
+ BADNAME,
+ BADFAMILY,
+ BADRESP,
+ CONNREFUSED,
+ TIMEOUT,
+ EOF,
+ FILE,
+ NOMEM,
+ DESTRUCTION,
+ BADSTR,
+ BADFLAGS,
+ NONAME,
+ BADHINTS,
+ NOTINITIALIZED,
+ LOADIPHLPAPI,
+ ADDRGETNETWORKPARAMS,
+ CANCELLED,
+};
+
+bindDefaultResolver(module.exports, Resolver.prototype);
+
+ObjectDefineProperties(module.exports, {
+ promises: {
+ __proto__: null,
+ configurable: true,
+ enumerable: true,
+ get() {
+ if (promises === null) {
+ promises = require("internal/dns/promises");
+ }
+ return promises;
+ },
+ },
+});
diff --git a/.codesandbox/node/domain.js b/.codesandbox/node/domain.js
new file mode 100644
index 0000000..29aefdb
--- /dev/null
+++ b/.codesandbox/node/domain.js
@@ -0,0 +1,529 @@
+"use strict";
+
+// WARNING: THIS MODULE IS PENDING DEPRECATION.
+//
+// No new pull requests targeting this module will be accepted
+// unless they address existing, critical bugs.
+
+const {
+ ArrayPrototypeEvery,
+ ArrayPrototypeIndexOf,
+ ArrayPrototypeLastIndexOf,
+ ArrayPrototypePush,
+ ArrayPrototypeSlice,
+ ArrayPrototypeSplice,
+ Error,
+ FunctionPrototypeCall,
+ ObjectDefineProperty,
+ Promise,
+ ReflectApply,
+ SafeMap,
+ SafeWeakMap,
+ StringPrototypeRepeat,
+ Symbol,
+} = primordials;
+
+const EventEmitter = require("events");
+const {
+ ERR_DOMAIN_CALLBACK_NOT_AVAILABLE,
+ ERR_DOMAIN_CANNOT_SET_UNCAUGHT_EXCEPTION_CAPTURE,
+ ERR_UNHANDLED_ERROR,
+} = require("internal/errors").codes;
+const { createHook } = require("async_hooks");
+const { useDomainTrampoline } = require("internal/async_hooks");
+
+const kWeak = Symbol("kWeak");
+const { WeakReference } = require("internal/util");
+
+// Overwrite process.domain with a getter/setter that will allow for more
+// effective optimizations
+const _domain = [null];
+ObjectDefineProperty(process, "domain", {
+ __proto__: null,
+ enumerable: true,
+ get: function () {
+ return _domain[0];
+ },
+ set: function (arg) {
+ return (_domain[0] = arg);
+ },
+});
+
+const vmPromises = new SafeWeakMap();
+const pairing = new SafeMap();
+const asyncHook = createHook({
+ init(asyncId, type, triggerAsyncId, resource) {
+ if (process.domain !== null && process.domain !== undefined) {
+ // If this operation is created while in a domain, let's mark it
+ pairing.set(asyncId, process.domain[kWeak]);
+ // Promises from other contexts, such as with the VM module, should not
+ // have a domain property as it can be used to escape the sandbox.
+ if (type !== "PROMISE" || resource instanceof Promise) {
+ ObjectDefineProperty(resource, "domain", {
+ __proto__: null,
+ configurable: true,
+ enumerable: false,
+ value: process.domain,
+ writable: true,
+ });
+ // Because promises from other contexts don't get a domain field,
+ // the domain needs to be held alive another way. Stuffing it in a
+ // weakmap connected to the promise lifetime can fix that.
+ } else {
+ vmPromises.set(resource, process.domain);
+ }
+ }
+ },
+ before(asyncId) {
+ const current = pairing.get(asyncId);
+ if (current !== undefined) {
+ // Enter domain for this cb
+ // We will get the domain through current.get(), because the resource
+ // object's .domain property makes sure it is not garbage collected.
+ // However, we do need to make the reference to the domain non-weak,
+ // so that it cannot be garbage collected before the after() hook.
+ current.incRef();
+ current.get().enter();
+ }
+ },
+ after(asyncId) {
+ const current = pairing.get(asyncId);
+ if (current !== undefined) {
+ // Exit domain for this cb
+ const domain = current.get();
+ current.decRef();
+ domain.exit();
+ }
+ },
+ destroy(asyncId) {
+ pairing.delete(asyncId); // cleaning up
+ },
+});
+
+// When domains are in use, they claim full ownership of the
+// uncaught exception capture callback.
+if (process.hasUncaughtExceptionCaptureCallback()) {
+ throw new ERR_DOMAIN_CALLBACK_NOT_AVAILABLE();
+}
+
+// Get the stack trace at the point where `domain` was required.
+// eslint-disable-next-line no-restricted-syntax
+const domainRequireStack = new Error("require(`domain`) at this point").stack;
+
+const { setUncaughtExceptionCaptureCallback } = process;
+process.setUncaughtExceptionCaptureCallback = function (fn) {
+ const err = new ERR_DOMAIN_CANNOT_SET_UNCAUGHT_EXCEPTION_CAPTURE();
+ err.stack += `\n${StringPrototypeRepeat("-", 40)}\n${domainRequireStack}`;
+ throw err;
+};
+
+let sendMakeCallbackDeprecation = false;
+function emitMakeCallbackDeprecation({ target, method }) {
+ if (!sendMakeCallbackDeprecation) {
+ process.emitWarning(
+ "Using a domain property in MakeCallback is deprecated. Use the " +
+ "async_context variant of MakeCallback or the AsyncResource class " +
+ "instead. " +
+ `(Triggered by calling ${method?.name || ""} ` +
+ `on ${target?.constructor?.name}.)`,
+ "DeprecationWarning",
+ "DEP0097"
+ );
+ sendMakeCallbackDeprecation = true;
+ }
+}
+
+function topLevelDomainCallback(cb, ...args) {
+ const domain = this.domain;
+ if (exports.active && domain)
+ emitMakeCallbackDeprecation({ target: this, method: cb });
+
+ if (domain) domain.enter();
+ const ret = ReflectApply(cb, this, args);
+ if (domain) domain.exit();
+
+ return ret;
+}
+
+// It's possible to enter one domain while already inside
+// another one. The stack is each entered domain.
+let stack = [];
+exports._stack = stack;
+useDomainTrampoline(topLevelDomainCallback);
+
+function updateExceptionCapture() {
+ if (
+ ArrayPrototypeEvery(stack, (domain) => domain.listenerCount("error") === 0)
+ ) {
+ setUncaughtExceptionCaptureCallback(null);
+ } else {
+ setUncaughtExceptionCaptureCallback(null);
+ setUncaughtExceptionCaptureCallback((er) => {
+ return process.domain._errorHandler(er);
+ });
+ }
+}
+
+process.on("newListener", (name, listener) => {
+ if (
+ name === "uncaughtException" &&
+ listener !== domainUncaughtExceptionClear
+ ) {
+ // Make sure the first listener for `uncaughtException` always clears
+ // the domain stack.
+ process.removeListener(name, domainUncaughtExceptionClear);
+ process.prependListener(name, domainUncaughtExceptionClear);
+ }
+});
+
+process.on("removeListener", (name, listener) => {
+ if (
+ name === "uncaughtException" &&
+ listener !== domainUncaughtExceptionClear
+ ) {
+ // If the domain listener would be the only remaining one, remove it.
+ const listeners = process.listeners("uncaughtException");
+ if (listeners.length === 1 && listeners[0] === domainUncaughtExceptionClear)
+ process.removeListener(name, domainUncaughtExceptionClear);
+ }
+});
+
+function domainUncaughtExceptionClear() {
+ stack.length = 0;
+ exports.active = process.domain = null;
+ updateExceptionCapture();
+}
+
+class Domain extends EventEmitter {
+ constructor() {
+ super();
+
+ this.members = [];
+ this[kWeak] = new WeakReference(this);
+ asyncHook.enable();
+
+ this.on("removeListener", updateExceptionCapture);
+ this.on("newListener", updateExceptionCapture);
+ }
+}
+
+exports.Domain = Domain;
+
+exports.create = exports.createDomain = function createDomain() {
+ return new Domain();
+};
+
+// The active domain is always the one that we're currently in.
+exports.active = null;
+Domain.prototype.members = undefined;
+
+// Called by process._fatalException in case an error was thrown.
+Domain.prototype._errorHandler = function (er) {
+ let caught = false;
+
+ if ((typeof er === "object" && er !== null) || typeof er === "function") {
+ ObjectDefineProperty(er, "domain", {
+ __proto__: null,
+ configurable: true,
+ enumerable: false,
+ value: this,
+ writable: true,
+ });
+ er.domainThrown = true;
+ }
+ // Pop all adjacent duplicates of the currently active domain from the stack.
+ // This is done to prevent a domain's error handler to run within the context
+ // of itself, and re-entering itself recursively handler as a result of an
+ // exception thrown in its context.
+ while (exports.active === this) {
+ this.exit();
+ }
+
+ // The top-level domain-handler is handled separately.
+ //
+ // The reason is that if V8 was passed a command line option
+ // asking it to abort on an uncaught exception (currently
+ // "--abort-on-uncaught-exception"), we want an uncaught exception
+ // in the top-level domain error handler to make the
+ // process abort. Using try/catch here would always make V8 think
+ // that these exceptions are caught, and thus would prevent it from
+ // aborting in these cases.
+ if (stack.length === 0) {
+ // If there's no error handler, do not emit an 'error' event
+ // as this would throw an error, make the process exit, and thus
+ // prevent the process 'uncaughtException' event from being emitted
+ // if a listener is set.
+ if (this.listenerCount("error") > 0) {
+ // Clear the uncaughtExceptionCaptureCallback so that we know that, since
+ // the top-level domain is not active anymore, it would be ok to abort on
+ // an uncaught exception at this point
+ setUncaughtExceptionCaptureCallback(null);
+ try {
+ caught = this.emit("error", er);
+ } finally {
+ updateExceptionCapture();
+ }
+ }
+ } else {
+ // Wrap this in a try/catch so we don't get infinite throwing
+ try {
+ // One of three things will happen here.
+ //
+ // 1. There is a handler, caught = true
+ // 2. There is no handler, caught = false
+ // 3. It throws, caught = false
+ //
+ // If caught is false after this, then there's no need to exit()
+ // the domain, because we're going to crash the process anyway.
+ caught = this.emit("error", er);
+ } catch (er2) {
+ // The domain error handler threw! oh no!
+ // See if another domain can catch THIS error,
+ // or else crash on the original one.
+ updateExceptionCapture();
+ if (stack.length) {
+ exports.active = process.domain = stack[stack.length - 1];
+ caught = process.domain._errorHandler(er2);
+ } else {
+ // Pass on to the next exception handler.
+ throw er2;
+ }
+ }
+ }
+
+ // Exit all domains on the stack. Uncaught exceptions end the
+ // current tick and no domains should be left on the stack
+ // between ticks.
+ domainUncaughtExceptionClear();
+
+ return caught;
+};
+
+Domain.prototype.enter = function () {
+ // Note that this might be a no-op, but we still need
+ // to push it onto the stack so that we can pop it later.
+ exports.active = process.domain = this;
+ ArrayPrototypePush(stack, this);
+ updateExceptionCapture();
+};
+
+Domain.prototype.exit = function () {
+ // Don't do anything if this domain is not on the stack.
+ const index = ArrayPrototypeLastIndexOf(stack, this);
+ if (index === -1) return;
+
+ // Exit all domains until this one.
+ ArrayPrototypeSplice(stack, index);
+
+ exports.active = stack.length === 0 ? undefined : stack[stack.length - 1];
+ process.domain = exports.active;
+ updateExceptionCapture();
+};
+
+// note: this works for timers as well.
+Domain.prototype.add = function (ee) {
+ // If the domain is already added, then nothing left to do.
+ if (ee.domain === this) return;
+
+ // Has a domain already - remove it first.
+ if (ee.domain) ee.domain.remove(ee);
+
+ // Check for circular Domain->Domain links.
+ // They cause big issues.
+ //
+ // For example:
+ // var d = domain.create();
+ // var e = domain.create();
+ // d.add(e);
+ // e.add(d);
+ // e.emit('error', er); // RangeError, stack overflow!
+ if (this.domain && ee instanceof Domain) {
+ for (let d = this.domain; d; d = d.domain) {
+ if (ee === d) return;
+ }
+ }
+
+ ObjectDefineProperty(ee, "domain", {
+ __proto__: null,
+ configurable: true,
+ enumerable: false,
+ value: this,
+ writable: true,
+ });
+ ArrayPrototypePush(this.members, ee);
+};
+
+Domain.prototype.remove = function (ee) {
+ ee.domain = null;
+ const index = ArrayPrototypeIndexOf(this.members, ee);
+ if (index !== -1) ArrayPrototypeSplice(this.members, index, 1);
+};
+
+Domain.prototype.run = function (fn) {
+ this.enter();
+ const ret = ReflectApply(fn, this, ArrayPrototypeSlice(arguments, 1));
+ this.exit();
+
+ return ret;
+};
+
+function intercepted(_this, self, cb, fnargs) {
+ if (fnargs[0] && fnargs[0] instanceof Error) {
+ const er = fnargs[0];
+ er.domainBound = cb;
+ er.domainThrown = false;
+ ObjectDefineProperty(er, "domain", {
+ __proto__: null,
+ configurable: true,
+ enumerable: false,
+ value: self,
+ writable: true,
+ });
+ self.emit("error", er);
+ return;
+ }
+
+ self.enter();
+ const ret = ReflectApply(cb, _this, ArrayPrototypeSlice(fnargs, 1));
+ self.exit();
+
+ return ret;
+}
+
+Domain.prototype.intercept = function (cb) {
+ const self = this;
+
+ function runIntercepted() {
+ return intercepted(this, self, cb, arguments);
+ }
+
+ return runIntercepted;
+};
+
+function bound(_this, self, cb, fnargs) {
+ self.enter();
+ const ret = ReflectApply(cb, _this, fnargs);
+ self.exit();
+
+ return ret;
+}
+
+Domain.prototype.bind = function (cb) {
+ const self = this;
+
+ function runBound() {
+ return bound(this, self, cb, arguments);
+ }
+
+ ObjectDefineProperty(runBound, "domain", {
+ __proto__: null,
+ configurable: true,
+ enumerable: false,
+ value: this,
+ writable: true,
+ });
+
+ return runBound;
+};
+
+// Override EventEmitter methods to make it domain-aware.
+EventEmitter.usingDomains = true;
+
+const eventInit = EventEmitter.init;
+EventEmitter.init = function (opts) {
+ ObjectDefineProperty(this, "domain", {
+ __proto__: null,
+ configurable: true,
+ enumerable: false,
+ value: null,
+ writable: true,
+ });
+ if (exports.active && !(this instanceof exports.Domain)) {
+ this.domain = exports.active;
+ }
+
+ return FunctionPrototypeCall(eventInit, this, opts);
+};
+
+const eventEmit = EventEmitter.prototype.emit;
+EventEmitter.prototype.emit = function emit(...args) {
+ const domain = this.domain;
+
+ const type = args[0];
+ const shouldEmitError = type === "error" && this.listenerCount(type) > 0;
+
+ // Just call original `emit` if current EE instance has `error`
+ // handler, there's no active domain or this is process
+ if (
+ shouldEmitError ||
+ domain === null ||
+ domain === undefined ||
+ this === process
+ ) {
+ return ReflectApply(eventEmit, this, args);
+ }
+
+ if (type === "error") {
+ const er = args.length > 1 && args[1] ? args[1] : new ERR_UNHANDLED_ERROR();
+
+ if (typeof er === "object") {
+ er.domainEmitter = this;
+ ObjectDefineProperty(er, "domain", {
+ __proto__: null,
+ configurable: true,
+ enumerable: false,
+ value: domain,
+ writable: true,
+ });
+ er.domainThrown = false;
+ }
+
+ // Remove the current domain (and its duplicates) from the domains stack and
+ // set the active domain to its parent (if any) so that the domain's error
+ // handler doesn't run in its own context. This prevents any event emitter
+ // created or any exception thrown in that error handler from recursively
+ // executing that error handler.
+ const origDomainsStack = ArrayPrototypeSlice(stack);
+ const origActiveDomain = process.domain;
+
+ // Travel the domains stack from top to bottom to find the first domain
+ // instance that is not a duplicate of the current active domain.
+ let idx = stack.length - 1;
+ while (idx > -1 && process.domain === stack[idx]) {
+ --idx;
+ }
+
+ // Change the stack to not contain the current active domain, and only the
+ // domains above it on the stack.
+ if (idx < 0) {
+ stack.length = 0;
+ } else {
+ ArrayPrototypeSplice(stack, idx + 1);
+ }
+
+ // Change the current active domain
+ if (stack.length > 0) {
+ exports.active = process.domain = stack[stack.length - 1];
+ } else {
+ exports.active = process.domain = null;
+ }
+
+ updateExceptionCapture();
+
+ domain.emit("error", er);
+
+ // Now that the domain's error handler has completed, restore the domains
+ // stack and the active domain to their original values.
+ exports._stack = stack = origDomainsStack;
+ exports.active = process.domain = origActiveDomain;
+ updateExceptionCapture();
+
+ return false;
+ }
+
+ domain.enter();
+ const ret = ReflectApply(eventEmit, this, args);
+ domain.exit();
+
+ return ret;
+};
diff --git a/.codesandbox/node/events.js b/.codesandbox/node/events.js
new file mode 100644
index 0000000..fdb1605
--- /dev/null
+++ b/.codesandbox/node/events.js
@@ -0,0 +1,1244 @@
+"use strict";
+
+const {
+ ArrayPrototypeJoin,
+ ArrayPrototypePop,
+ ArrayPrototypePush,
+ ArrayPrototypeSlice,
+ ArrayPrototypeSplice,
+ ArrayPrototypeUnshift,
+ AsyncIteratorPrototype,
+ Boolean,
+ Error,
+ ErrorCaptureStackTrace,
+ FunctionPrototypeBind,
+ NumberMAX_SAFE_INTEGER,
+ ObjectDefineProperties,
+ ObjectDefineProperty,
+ ObjectGetPrototypeOf,
+ ObjectSetPrototypeOf,
+ Promise,
+ PromiseReject,
+ PromiseResolve,
+ ReflectApply,
+ ReflectOwnKeys,
+ String,
+ StringPrototypeSplit,
+ Symbol,
+ SymbolAsyncIterator,
+ SymbolDispose,
+ SymbolFor,
+} = primordials;
+const kRejection = SymbolFor("nodejs.rejection");
+
+const { kEmptyObject, spliceOne } = require("internal/util");
+
+const { inspect, identicalSequenceRange } = require("internal/util/inspect");
+
+let FixedQueue;
+let kFirstEventParam;
+let kResistStopPropagation;
+
+const {
+ AbortError,
+ codes: { ERR_INVALID_ARG_TYPE, ERR_UNHANDLED_ERROR },
+ genericNodeError,
+ kEnhanceStackBeforeInspector,
+} = require("internal/errors");
+
+const {
+ validateInteger,
+ validateAbortSignal,
+ validateBoolean,
+ validateFunction,
+ validateNumber,
+ validateObject,
+ validateString,
+} = require("internal/validators");
+const { addAbortListener } = require("internal/events/abort_listener");
+
+const kCapture = Symbol("kCapture");
+const kErrorMonitor = Symbol("events.errorMonitor");
+const kShapeMode = Symbol("shapeMode");
+const kMaxEventTargetListeners = Symbol("events.maxEventTargetListeners");
+const kMaxEventTargetListenersWarned = Symbol(
+ "events.maxEventTargetListenersWarned"
+);
+const kWatermarkData = SymbolFor("nodejs.watermarkData");
+
+let EventEmitterAsyncResource;
+// The EventEmitterAsyncResource has to be initialized lazily because event.js
+// is loaded so early in the bootstrap process, before async_hooks is available.
+//
+// This implementation was adapted straight from addaleax's
+// eventemitter-asyncresource MIT-licensed userland module.
+// https://github.com/addaleax/eventemitter-asyncresource
+function lazyEventEmitterAsyncResource() {
+ if (EventEmitterAsyncResource === undefined) {
+ const { AsyncResource } = require("async_hooks");
+
+ class EventEmitterReferencingAsyncResource extends AsyncResource {
+ #eventEmitter;
+
+ /**
+ * @param {EventEmitter} ee
+ * @param {string} [type]
+ * @param {{
+ * triggerAsyncId?: number,
+ * requireManualDestroy?: boolean,
+ * }} [options]
+ */
+ constructor(ee, type, options) {
+ super(type, options);
+ this.#eventEmitter = ee;
+ }
+
+ /**
+ * @type {EventEmitter}
+ */
+ get eventEmitter() {
+ return this.#eventEmitter;
+ }
+ }
+
+ EventEmitterAsyncResource = class EventEmitterAsyncResource extends (
+ EventEmitter
+ ) {
+ #asyncResource;
+
+ /**
+ * @param {{
+ * name?: string,
+ * triggerAsyncId?: number,
+ * requireManualDestroy?: boolean,
+ * }} [options]
+ */
+ constructor(options = undefined) {
+ let name;
+ if (typeof options === "string") {
+ name = options;
+ options = undefined;
+ } else {
+ if (new.target === EventEmitterAsyncResource) {
+ validateString(options?.name, "options.name");
+ }
+ name = options?.name || new.target.name;
+ }
+ super(options);
+
+ this.#asyncResource = new EventEmitterReferencingAsyncResource(
+ this,
+ name,
+ options
+ );
+ }
+
+ /**
+ * @param {symbol|string} event
+ * @param {any[]} args
+ * @returns {boolean}
+ */
+ emit(event, ...args) {
+ const asyncResource = this.#asyncResource;
+ ArrayPrototypeUnshift(args, super.emit, this, event);
+ return ReflectApply(asyncResource.runInAsyncScope, asyncResource, args);
+ }
+
+ /**
+ * @returns {void}
+ */
+ emitDestroy() {
+ this.#asyncResource.emitDestroy();
+ }
+
+ /**
+ * @type {number}
+ */
+ get asyncId() {
+ return this.#asyncResource.asyncId();
+ }
+
+ /**
+ * @type {number}
+ */
+ get triggerAsyncId() {
+ return this.#asyncResource.triggerAsyncId();
+ }
+
+ /**
+ * @type {EventEmitterReferencingAsyncResource}
+ */
+ get asyncResource() {
+ return this.#asyncResource;
+ }
+ };
+ }
+ return EventEmitterAsyncResource;
+}
+
+/**
+ * Creates a new `EventEmitter` instance.
+ * @param {{ captureRejections?: boolean; }} [opts]
+ * @constructs EventEmitter
+ */
+function EventEmitter(opts) {
+ EventEmitter.init.call(this, opts);
+}
+module.exports = EventEmitter;
+module.exports.addAbortListener = addAbortListener;
+module.exports.once = once;
+module.exports.on = on;
+module.exports.getEventListeners = getEventListeners;
+module.exports.getMaxListeners = getMaxListeners;
+module.exports.listenerCount = listenerCount;
+// Backwards-compat with node 0.10.x
+EventEmitter.EventEmitter = EventEmitter;
+
+EventEmitter.usingDomains = false;
+
+EventEmitter.captureRejectionSymbol = kRejection;
+ObjectDefineProperty(EventEmitter, "captureRejections", {
+ __proto__: null,
+ get() {
+ return EventEmitter.prototype[kCapture];
+ },
+ set(value) {
+ validateBoolean(value, "EventEmitter.captureRejections");
+
+ EventEmitter.prototype[kCapture] = value;
+ },
+ enumerable: true,
+});
+
+ObjectDefineProperty(EventEmitter, "EventEmitterAsyncResource", {
+ __proto__: null,
+ enumerable: true,
+ get: lazyEventEmitterAsyncResource,
+ set: undefined,
+ configurable: true,
+});
+
+EventEmitter.errorMonitor = kErrorMonitor;
+
+// The default for captureRejections is false
+ObjectDefineProperty(EventEmitter.prototype, kCapture, {
+ __proto__: null,
+ value: false,
+ writable: true,
+ enumerable: false,
+});
+
+EventEmitter.prototype._events = undefined;
+EventEmitter.prototype._eventsCount = 0;
+EventEmitter.prototype._maxListeners = undefined;
+
+// By default EventEmitters will print a warning if more than 10 listeners are
+// added to it. This is a useful default which helps finding memory leaks.
+let defaultMaxListeners = 10;
+let isEventTarget;
+
+function checkListener(listener) {
+ validateFunction(listener, "listener");
+}
+
+ObjectDefineProperty(EventEmitter, "defaultMaxListeners", {
+ __proto__: null,
+ enumerable: true,
+ get: function () {
+ return defaultMaxListeners;
+ },
+ set: function (arg) {
+ validateNumber(arg, "defaultMaxListeners", 0);
+ defaultMaxListeners = arg;
+ },
+});
+
+ObjectDefineProperties(EventEmitter, {
+ kMaxEventTargetListeners: {
+ __proto__: null,
+ value: kMaxEventTargetListeners,
+ enumerable: false,
+ configurable: false,
+ writable: false,
+ },
+ kMaxEventTargetListenersWarned: {
+ __proto__: null,
+ value: kMaxEventTargetListenersWarned,
+ enumerable: false,
+ configurable: false,
+ writable: false,
+ },
+});
+
+/**
+ * Sets the max listeners.
+ * @param {number} n
+ * @param {EventTarget[] | EventEmitter[]} [eventTargets]
+ * @returns {void}
+ */
+EventEmitter.setMaxListeners = function (
+ n = defaultMaxListeners,
+ ...eventTargets
+) {
+ validateNumber(n, "setMaxListeners", 0);
+ if (eventTargets.length === 0) {
+ defaultMaxListeners = n;
+ } else {
+ if (isEventTarget === undefined)
+ isEventTarget = require("internal/event_target").isEventTarget;
+
+ for (let i = 0; i < eventTargets.length; i++) {
+ const target = eventTargets[i];
+ if (isEventTarget(target)) {
+ target[kMaxEventTargetListeners] = n;
+ target[kMaxEventTargetListenersWarned] = false;
+ } else if (typeof target.setMaxListeners === "function") {
+ target.setMaxListeners(n);
+ } else {
+ throw new ERR_INVALID_ARG_TYPE(
+ "eventTargets",
+ ["EventEmitter", "EventTarget"],
+ target
+ );
+ }
+ }
+ }
+};
+
+// If you're updating this function definition, please also update any
+// re-definitions, such as the one in the Domain module (lib/domain.js).
+EventEmitter.init = function (opts) {
+ if (
+ this._events === undefined ||
+ this._events === ObjectGetPrototypeOf(this)._events
+ ) {
+ this._events = { __proto__: null };
+ this._eventsCount = 0;
+ this[kShapeMode] = false;
+ } else {
+ this[kShapeMode] = true;
+ }
+
+ this._maxListeners ||= undefined;
+
+ if (opts?.captureRejections) {
+ validateBoolean(opts.captureRejections, "options.captureRejections");
+ this[kCapture] = Boolean(opts.captureRejections);
+ } else {
+ // Assigning the kCapture property directly saves an expensive
+ // prototype lookup in a very sensitive hot path.
+ this[kCapture] = EventEmitter.prototype[kCapture];
+ }
+};
+
+function addCatch(that, promise, type, args) {
+ if (!that[kCapture]) {
+ return;
+ }
+
+ // Handle Promises/A+ spec, then could be a getter
+ // that throws on second use.
+ try {
+ const then = promise.then;
+
+ if (typeof then === "function") {
+ then.call(promise, undefined, function (err) {
+ // The callback is called with nextTick to avoid a follow-up
+ // rejection from this promise.
+ process.nextTick(emitUnhandledRejectionOrErr, that, err, type, args);
+ });
+ }
+ } catch (err) {
+ that.emit("error", err);
+ }
+}
+
+function emitUnhandledRejectionOrErr(ee, err, type, args) {
+ if (typeof ee[kRejection] === "function") {
+ ee[kRejection](err, type, ...args);
+ } else {
+ // We have to disable the capture rejections mechanism, otherwise
+ // we might end up in an infinite loop.
+ const prev = ee[kCapture];
+
+ // If the error handler throws, it is not catchable and it
+ // will end up in 'uncaughtException'. We restore the previous
+ // value of kCapture in case the uncaughtException is present
+ // and the exception is handled.
+ try {
+ ee[kCapture] = false;
+ ee.emit("error", err);
+ } finally {
+ ee[kCapture] = prev;
+ }
+ }
+}
+
+/**
+ * Increases the max listeners of the event emitter.
+ * @param {number} n
+ * @returns {EventEmitter}
+ */
+EventEmitter.prototype.setMaxListeners = function setMaxListeners(n) {
+ validateNumber(n, "setMaxListeners", 0);
+ this._maxListeners = n;
+ return this;
+};
+
+function _getMaxListeners(that) {
+ if (that._maxListeners === undefined) return EventEmitter.defaultMaxListeners;
+ return that._maxListeners;
+}
+
+/**
+ * Returns the current max listener value for the event emitter.
+ * @returns {number}
+ */
+EventEmitter.prototype.getMaxListeners = function getMaxListeners() {
+ return _getMaxListeners(this);
+};
+
+function enhanceStackTrace(err, own) {
+ let ctorInfo = "";
+ try {
+ const { name } = this.constructor;
+ if (name !== "EventEmitter") ctorInfo = ` on ${name} instance`;
+ } catch {
+ // Continue regardless of error.
+ }
+ const sep = `\nEmitted 'error' event${ctorInfo} at:\n`;
+
+ const errStack = ArrayPrototypeSlice(
+ StringPrototypeSplit(err.stack, "\n"),
+ 1
+ );
+ const ownStack = ArrayPrototypeSlice(
+ StringPrototypeSplit(own.stack, "\n"),
+ 1
+ );
+
+ const { len, offset } = identicalSequenceRange(ownStack, errStack);
+ if (len > 0) {
+ ArrayPrototypeSplice(
+ ownStack,
+ offset + 1,
+ len - 2,
+ " [... lines matching original stack trace ...]"
+ );
+ }
+
+ return err.stack + sep + ArrayPrototypeJoin(ownStack, "\n");
+}
+
+/**
+ * Synchronously calls each of the listeners registered
+ * for the event.
+ * @param {string | symbol} type
+ * @param {...any} [args]
+ * @returns {boolean}
+ */
+EventEmitter.prototype.emit = function emit(type, ...args) {
+ let doError = type === "error";
+
+ const events = this._events;
+ if (events !== undefined) {
+ if (doError && events[kErrorMonitor] !== undefined)
+ this.emit(kErrorMonitor, ...args);
+ doError &&= events.error === undefined;
+ } else if (!doError) return false;
+
+ // If there is no 'error' event listener then throw.
+ if (doError) {
+ let er;
+ if (args.length > 0) er = args[0];
+ if (er instanceof Error) {
+ try {
+ const capture = {};
+ ErrorCaptureStackTrace(capture, EventEmitter.prototype.emit);
+ ObjectDefineProperty(er, kEnhanceStackBeforeInspector, {
+ __proto__: null,
+ value: FunctionPrototypeBind(enhanceStackTrace, this, er, capture),
+ configurable: true,
+ });
+ } catch {
+ // Continue regardless of error.
+ }
+
+ // Note: The comments on the `throw` lines are intentional, they show
+ // up in Node's output if this results in an unhandled exception.
+ throw er; // Unhandled 'error' event
+ }
+
+ let stringifiedEr;
+ try {
+ stringifiedEr = inspect(er);
+ } catch {
+ stringifiedEr = er;
+ }
+
+ // At least give some kind of context to the user
+ const err = new ERR_UNHANDLED_ERROR(stringifiedEr);
+ err.context = er;
+ throw err; // Unhandled 'error' event
+ }
+
+ const handler = events[type];
+
+ if (handler === undefined) return false;
+
+ if (typeof handler === "function") {
+ const result = ReflectApply(handler, this, args);
+
+ // We check if result is undefined first because that
+ // is the most common case so we do not pay any perf
+ // penalty
+ if (result !== undefined && result !== null) {
+ addCatch(this, result, type, args);
+ }
+ } else {
+ const len = handler.length;
+ const listeners = arrayClone(handler);
+ for (let i = 0; i < len; ++i) {
+ const result = ReflectApply(listeners[i], this, args);
+
+ // We check if result is undefined first because that
+ // is the most common case so we do not pay any perf
+ // penalty.
+ // This code is duplicated because extracting it away
+ // would make it non-inlineable.
+ if (result !== undefined && result !== null) {
+ addCatch(this, result, type, args);
+ }
+ }
+ }
+
+ return true;
+};
+
+function _addListener(target, type, listener, prepend) {
+ let m;
+ let events;
+ let existing;
+
+ checkListener(listener);
+
+ events = target._events;
+ if (events === undefined) {
+ events = target._events = { __proto__: null };
+ target._eventsCount = 0;
+ } else {
+ // To avoid recursion in the case that type === "newListener"! Before
+ // adding it to the listeners, first emit "newListener".
+ if (events.newListener !== undefined) {
+ target.emit("newListener", type, listener.listener ?? listener);
+
+ // Re-assign `events` because a newListener handler could have caused the
+ // this._events to be assigned to a new object
+ events = target._events;
+ }
+ existing = events[type];
+ }
+
+ if (existing === undefined) {
+ // Optimize the case of one listener. Don't need the extra array object.
+ events[type] = listener;
+ ++target._eventsCount;
+ } else {
+ if (typeof existing === "function") {
+ // Adding the second element, need to change to array.
+ existing = events[type] = prepend
+ ? [listener, existing]
+ : [existing, listener];
+ // If we've already got an array, just append.
+ } else if (prepend) {
+ existing.unshift(listener);
+ } else {
+ existing.push(listener);
+ }
+
+ // Check for listener leak
+ m = _getMaxListeners(target);
+ if (m > 0 && existing.length > m && !existing.warned) {
+ existing.warned = true;
+ // No error code for this since it is a Warning
+ const w = genericNodeError(
+ `Possible EventEmitter memory leak detected. ${
+ existing.length
+ } ${String(type)} listeners ` +
+ `added to ${inspect(target, {
+ depth: -1,
+ })}. MaxListeners is ${m}. Use emitter.setMaxListeners() to increase limit`,
+ {
+ name: "MaxListenersExceededWarning",
+ emitter: target,
+ type: type,
+ count: existing.length,
+ }
+ );
+ process.emitWarning(w);
+ }
+ }
+
+ return target;
+}
+
+/**
+ * Adds a listener to the event emitter.
+ * @param {string | symbol} type
+ * @param {Function} listener
+ * @returns {EventEmitter}
+ */
+EventEmitter.prototype.addListener = function addListener(type, listener) {
+ return _addListener(this, type, listener, false);
+};
+
+EventEmitter.prototype.on = EventEmitter.prototype.addListener;
+
+/**
+ * Adds the `listener` function to the beginning of
+ * the listeners array.
+ * @param {string | symbol} type
+ * @param {Function} listener
+ * @returns {EventEmitter}
+ */
+EventEmitter.prototype.prependListener = function prependListener(
+ type,
+ listener
+) {
+ return _addListener(this, type, listener, true);
+};
+
+function onceWrapper() {
+ if (!this.fired) {
+ this.target.removeListener(this.type, this.wrapFn);
+ this.fired = true;
+ if (arguments.length === 0) return this.listener.call(this.target);
+ return ReflectApply(this.listener, this.target, arguments);
+ }
+}
+
+function _onceWrap(target, type, listener) {
+ const state = { fired: false, wrapFn: undefined, target, type, listener };
+ const wrapped = onceWrapper.bind(state);
+ wrapped.listener = listener;
+ state.wrapFn = wrapped;
+ return wrapped;
+}
+
+/**
+ * Adds a one-time `listener` function to the event emitter.
+ * @param {string | symbol} type
+ * @param {Function} listener
+ * @returns {EventEmitter}
+ */
+EventEmitter.prototype.once = function once(type, listener) {
+ checkListener(listener);
+
+ this.on(type, _onceWrap(this, type, listener));
+ return this;
+};
+
+/**
+ * Adds a one-time `listener` function to the beginning of
+ * the listeners array.
+ * @param {string | symbol} type
+ * @param {Function} listener
+ * @returns {EventEmitter}
+ */
+EventEmitter.prototype.prependOnceListener = function prependOnceListener(
+ type,
+ listener
+) {
+ checkListener(listener);
+
+ this.prependListener(type, _onceWrap(this, type, listener));
+ return this;
+};
+
+/**
+ * Removes the specified `listener` from the listeners array.
+ * @param {string | symbol} type
+ * @param {Function} listener
+ * @returns {EventEmitter}
+ */
+EventEmitter.prototype.removeListener = function removeListener(
+ type,
+ listener
+) {
+ checkListener(listener);
+
+ const events = this._events;
+ if (events === undefined) return this;
+
+ const list = events[type];
+ if (list === undefined) return this;
+
+ if (list === listener || list.listener === listener) {
+ this._eventsCount -= 1;
+
+ if (this[kShapeMode]) {
+ events[type] = undefined;
+ } else if (this._eventsCount === 0) {
+ this._events = { __proto__: null };
+ } else {
+ delete events[type];
+ if (events.removeListener)
+ this.emit("removeListener", type, list.listener || listener);
+ }
+ } else if (typeof list !== "function") {
+ let position = -1;
+
+ for (let i = list.length - 1; i >= 0; i--) {
+ if (list[i] === listener || list[i].listener === listener) {
+ position = i;
+ break;
+ }
+ }
+
+ if (position < 0) return this;
+
+ if (position === 0) list.shift();
+ else {
+ spliceOne(list, position);
+ }
+
+ if (list.length === 1) events[type] = list[0];
+
+ if (events.removeListener !== undefined)
+ this.emit("removeListener", type, listener);
+ }
+
+ return this;
+};
+
+EventEmitter.prototype.off = EventEmitter.prototype.removeListener;
+
+/**
+ * Removes all listeners from the event emitter. (Only
+ * removes listeners for a specific event name if specified
+ * as `type`).
+ * @param {string | symbol} [type]
+ * @returns {EventEmitter}
+ */
+EventEmitter.prototype.removeAllListeners = function removeAllListeners(type) {
+ const events = this._events;
+ if (events === undefined) return this;
+
+ // Not listening for removeListener, no need to emit
+ if (events.removeListener === undefined) {
+ if (arguments.length === 0) {
+ this._events = { __proto__: null };
+ this._eventsCount = 0;
+ } else if (events[type] !== undefined) {
+ if (--this._eventsCount === 0) this._events = { __proto__: null };
+ else delete events[type];
+ }
+ this[kShapeMode] = false;
+ return this;
+ }
+
+ // Emit removeListener for all listeners on all events
+ if (arguments.length === 0) {
+ for (const key of ReflectOwnKeys(events)) {
+ if (key === "removeListener") continue;
+ this.removeAllListeners(key);
+ }
+ this.removeAllListeners("removeListener");
+ this._events = { __proto__: null };
+ this._eventsCount = 0;
+ this[kShapeMode] = false;
+ return this;
+ }
+
+ const listeners = events[type];
+
+ if (typeof listeners === "function") {
+ this.removeListener(type, listeners);
+ } else if (listeners !== undefined) {
+ // LIFO order
+ for (let i = listeners.length - 1; i >= 0; i--) {
+ this.removeListener(type, listeners[i]);
+ }
+ }
+
+ return this;
+};
+
+function _listeners(target, type, unwrap) {
+ const events = target._events;
+
+ if (events === undefined) return [];
+
+ const evlistener = events[type];
+ if (evlistener === undefined) return [];
+
+ if (typeof evlistener === "function")
+ return unwrap ? [evlistener.listener || evlistener] : [evlistener];
+
+ return unwrap ? unwrapListeners(evlistener) : arrayClone(evlistener);
+}
+
+/**
+ * Returns a copy of the array of listeners for the event name
+ * specified as `type`.
+ * @param {string | symbol} type
+ * @returns {Function[]}
+ */
+EventEmitter.prototype.listeners = function listeners(type) {
+ return _listeners(this, type, true);
+};
+
+/**
+ * Returns a copy of the array of listeners and wrappers for
+ * the event name specified as `type`.
+ * @param {string | symbol} type
+ * @returns {Function[]}
+ */
+EventEmitter.prototype.rawListeners = function rawListeners(type) {
+ return _listeners(this, type, false);
+};
+
+/**
+ * Returns the number of listeners listening to event name
+ * specified as `type`.
+ * @param {string | symbol} type
+ * @param {Function} [listener]
+ * @returns {number}
+ */
+EventEmitter.prototype.listenerCount = function listenerCount(type, listener) {
+ const events = this._events;
+
+ if (events !== undefined) {
+ const evlistener = events[type];
+
+ if (typeof evlistener === "function") {
+ if (listener != null) {
+ return listener === evlistener || listener === evlistener.listener
+ ? 1
+ : 0;
+ }
+
+ return 1;
+ } else if (evlistener !== undefined) {
+ if (listener != null) {
+ let matching = 0;
+
+ for (let i = 0, l = evlistener.length; i < l; i++) {
+ if (
+ evlistener[i] === listener ||
+ evlistener[i].listener === listener
+ ) {
+ matching++;
+ }
+ }
+
+ return matching;
+ }
+
+ return evlistener.length;
+ }
+ }
+
+ return 0;
+};
+
+/**
+ * Returns an array listing the events for which
+ * the emitter has registered listeners.
+ * @returns {(string | symbol)[]}
+ */
+EventEmitter.prototype.eventNames = function eventNames() {
+ return this._eventsCount > 0 ? ReflectOwnKeys(this._events) : [];
+};
+
+function arrayClone(arr) {
+ // At least since V8 8.3, this implementation is faster than the previous
+ // which always used a simple for-loop
+ switch (arr.length) {
+ case 2:
+ return [arr[0], arr[1]];
+ case 3:
+ return [arr[0], arr[1], arr[2]];
+ case 4:
+ return [arr[0], arr[1], arr[2], arr[3]];
+ case 5:
+ return [arr[0], arr[1], arr[2], arr[3], arr[4]];
+ case 6:
+ return [arr[0], arr[1], arr[2], arr[3], arr[4], arr[5]];
+ }
+ return ArrayPrototypeSlice(arr);
+}
+
+function unwrapListeners(arr) {
+ const ret = arrayClone(arr);
+ for (let i = 0; i < ret.length; ++i) {
+ const orig = ret[i].listener;
+ if (typeof orig === "function") ret[i] = orig;
+ }
+ return ret;
+}
+
+/**
+ * Returns a copy of the array of listeners for the event name
+ * specified as `type`.
+ * @param {EventEmitter | EventTarget} emitterOrTarget
+ * @param {string | symbol} type
+ * @returns {Function[]}
+ */
+function getEventListeners(emitterOrTarget, type) {
+ // First check if EventEmitter
+ if (typeof emitterOrTarget.listeners === "function") {
+ return emitterOrTarget.listeners(type);
+ }
+ // Require event target lazily to avoid always loading it
+ const { isEventTarget, kEvents } = require("internal/event_target");
+ if (isEventTarget(emitterOrTarget)) {
+ const root = emitterOrTarget[kEvents].get(type);
+ const listeners = [];
+ let handler = root?.next;
+ while (handler?.listener !== undefined) {
+ const listener = handler.listener?.deref
+ ? handler.listener.deref()
+ : handler.listener;
+ listeners.push(listener);
+ handler = handler.next;
+ }
+ return listeners;
+ }
+ throw new ERR_INVALID_ARG_TYPE(
+ "emitter",
+ ["EventEmitter", "EventTarget"],
+ emitterOrTarget
+ );
+}
+
+/**
+ * Returns the max listeners set.
+ * @param {EventEmitter | EventTarget} emitterOrTarget
+ * @returns {number}
+ */
+function getMaxListeners(emitterOrTarget) {
+ if (typeof emitterOrTarget?.getMaxListeners === "function") {
+ return _getMaxListeners(emitterOrTarget);
+ } else if (typeof emitterOrTarget?.[kMaxEventTargetListeners] === "number") {
+ return emitterOrTarget[kMaxEventTargetListeners];
+ }
+
+ throw new ERR_INVALID_ARG_TYPE(
+ "emitter",
+ ["EventEmitter", "EventTarget"],
+ emitterOrTarget
+ );
+}
+
+/**
+ * Returns the number of registered listeners for `type`.
+ * @param {EventEmitter | EventTarget} emitterOrTarget
+ * @param {string | symbol} type
+ * @returns {number}
+ */
+function listenerCount(emitterOrTarget, type) {
+ if (typeof emitterOrTarget.listenerCount === "function") {
+ return emitterOrTarget.listenerCount(type);
+ }
+ const { isEventTarget, kEvents } = require("internal/event_target");
+ if (isEventTarget(emitterOrTarget)) {
+ return emitterOrTarget[kEvents].get(type)?.size ?? 0;
+ }
+ throw new ERR_INVALID_ARG_TYPE(
+ "emitter",
+ ["EventEmitter", "EventTarget"],
+ emitterOrTarget
+ );
+}
+
+/**
+ * Creates a `Promise` that is fulfilled when the emitter
+ * emits the given event.
+ * @param {EventEmitter} emitter
+ * @param {string | symbol} name
+ * @param {{ signal: AbortSignal; }} [options]
+ * @returns {Promise}
+ */
+async function once(emitter, name, options = kEmptyObject) {
+ validateObject(options, "options");
+ const { signal } = options;
+ validateAbortSignal(signal, "options.signal");
+ if (signal?.aborted)
+ throw new AbortError(undefined, { cause: signal.reason });
+ return new Promise((resolve, reject) => {
+ const errorListener = (err) => {
+ emitter.removeListener(name, resolver);
+ if (signal != null) {
+ eventTargetAgnosticRemoveListener(signal, "abort", abortListener);
+ }
+ reject(err);
+ };
+ const resolver = (...args) => {
+ if (typeof emitter.removeListener === "function") {
+ emitter.removeListener("error", errorListener);
+ }
+ if (signal != null) {
+ eventTargetAgnosticRemoveListener(signal, "abort", abortListener);
+ }
+ resolve(args);
+ };
+
+ kResistStopPropagation ??=
+ require("internal/event_target").kResistStopPropagation;
+ const opts = {
+ __proto__: null,
+ once: true,
+ [kResistStopPropagation]: true,
+ };
+ eventTargetAgnosticAddListener(emitter, name, resolver, opts);
+ if (name !== "error" && typeof emitter.once === "function") {
+ // EventTarget does not have `error` event semantics like Node
+ // EventEmitters, we listen to `error` events only on EventEmitters.
+ emitter.once("error", errorListener);
+ }
+ function abortListener() {
+ eventTargetAgnosticRemoveListener(emitter, name, resolver);
+ eventTargetAgnosticRemoveListener(emitter, "error", errorListener);
+ reject(new AbortError(undefined, { cause: signal?.reason }));
+ }
+ if (signal != null) {
+ eventTargetAgnosticAddListener(signal, "abort", abortListener, {
+ __proto__: null,
+ once: true,
+ [kResistStopPropagation]: true,
+ });
+ }
+ });
+}
+
+function createIterResult(value, done) {
+ return { value, done };
+}
+
+function eventTargetAgnosticRemoveListener(emitter, name, listener, flags) {
+ if (typeof emitter.removeListener === "function") {
+ emitter.removeListener(name, listener);
+ } else if (typeof emitter.removeEventListener === "function") {
+ emitter.removeEventListener(name, listener, flags);
+ } else {
+ throw new ERR_INVALID_ARG_TYPE("emitter", "EventEmitter", emitter);
+ }
+}
+
+function eventTargetAgnosticAddListener(emitter, name, listener, flags) {
+ if (typeof emitter.on === "function") {
+ if (flags?.once) {
+ emitter.once(name, listener);
+ } else {
+ emitter.on(name, listener);
+ }
+ } else if (typeof emitter.addEventListener === "function") {
+ emitter.addEventListener(name, listener, flags);
+ } else {
+ throw new ERR_INVALID_ARG_TYPE("emitter", "EventEmitter", emitter);
+ }
+}
+
+/**
+ * Returns an `AsyncIterator` that iterates `event` events.
+ * @param {EventEmitter} emitter
+ * @param {string | symbol} event
+ * @param {{
+ * signal: AbortSignal;
+ * close?: string[];
+ * highWaterMark?: number,
+ * lowWaterMark?: number
+ * }} [options]
+ * @returns {AsyncIterator}
+ */
+function on(emitter, event, options = kEmptyObject) {
+ // Parameters validation
+ validateObject(options, "options");
+ const signal = options.signal;
+ validateAbortSignal(signal, "options.signal");
+ if (signal?.aborted)
+ throw new AbortError(undefined, { cause: signal.reason });
+ // Support both highWaterMark and highWatermark for backward compatibility
+ const highWatermark =
+ options.highWaterMark ?? options.highWatermark ?? NumberMAX_SAFE_INTEGER;
+ validateInteger(highWatermark, "options.highWaterMark", 1);
+ // Support both lowWaterMark and lowWatermark for backward compatibility
+ const lowWatermark = options.lowWaterMark ?? options.lowWatermark ?? 1;
+ validateInteger(lowWatermark, "options.lowWaterMark", 1);
+
+ // Preparing controlling queues and variables
+ FixedQueue ??= require("internal/fixed_queue");
+ const unconsumedEvents = new FixedQueue();
+ const unconsumedPromises = new FixedQueue();
+ let paused = false;
+ let error = null;
+ let finished = false;
+ let size = 0;
+
+ const iterator = ObjectSetPrototypeOf(
+ {
+ next() {
+ // First, we consume all unread events
+ if (size) {
+ const value = unconsumedEvents.shift();
+ size--;
+ if (paused && size < lowWatermark) {
+ emitter.resume();
+ paused = false;
+ }
+ return PromiseResolve(createIterResult(value, false));
+ }
+
+ // Then we error, if an error happened
+ // This happens one time if at all, because after 'error'
+ // we stop listening
+ if (error) {
+ const p = PromiseReject(error);
+ // Only the first element errors
+ error = null;
+ return p;
+ }
+
+ // If the iterator is finished, resolve to done
+ if (finished) return closeHandler();
+
+ // Wait until an event happens
+ return new Promise(function (resolve, reject) {
+ unconsumedPromises.push({ resolve, reject });
+ });
+ },
+
+ return() {
+ return closeHandler();
+ },
+
+ throw(err) {
+ if (!err || !(err instanceof Error)) {
+ throw new ERR_INVALID_ARG_TYPE(
+ "EventEmitter.AsyncIterator",
+ "Error",
+ err
+ );
+ }
+ errorHandler(err);
+ },
+ [SymbolAsyncIterator]() {
+ return this;
+ },
+ [kWatermarkData]: {
+ /**
+ * The current queue size
+ * @returns {number}
+ */
+ get size() {
+ return size;
+ },
+ /**
+ * The low watermark. The emitter is resumed every time size is lower than it
+ * @returns {number}
+ */
+ get low() {
+ return lowWatermark;
+ },
+ /**
+ * The high watermark. The emitter is paused every time size is higher than it
+ * @returns {number}
+ */
+ get high() {
+ return highWatermark;
+ },
+ /**
+ * It checks whether the emitter is paused by the watermark controller or not
+ * @returns {boolean}
+ */
+ get isPaused() {
+ return paused;
+ },
+ },
+ },
+ AsyncIteratorPrototype
+ );
+
+ // Adding event handlers
+ const { addEventListener, removeAll } = listenersController();
+ kFirstEventParam ??= require("internal/events/symbols").kFirstEventParam;
+ addEventListener(
+ emitter,
+ event,
+ options[kFirstEventParam]
+ ? eventHandler
+ : function (...args) {
+ return eventHandler(args);
+ }
+ );
+ if (event !== "error" && typeof emitter.on === "function") {
+ addEventListener(emitter, "error", errorHandler);
+ }
+ const closeEvents = options?.close;
+ if (closeEvents?.length) {
+ for (let i = 0; i < closeEvents.length; i++) {
+ addEventListener(emitter, closeEvents[i], closeHandler);
+ }
+ }
+
+ const abortListenerDisposable = signal
+ ? addAbortListener(signal, abortListener)
+ : null;
+
+ return iterator;
+
+ function abortListener() {
+ errorHandler(new AbortError(undefined, { cause: signal?.reason }));
+ }
+
+ function eventHandler(value) {
+ if (unconsumedPromises.isEmpty()) {
+ size++;
+ if (!paused && size > highWatermark) {
+ paused = true;
+ emitter.pause();
+ }
+ unconsumedEvents.push(value);
+ } else unconsumedPromises.shift().resolve(createIterResult(value, false));
+ }
+
+ function errorHandler(err) {
+ if (unconsumedPromises.isEmpty()) error = err;
+ else unconsumedPromises.shift().reject(err);
+
+ closeHandler();
+ }
+
+ function closeHandler() {
+ abortListenerDisposable?.[SymbolDispose]();
+ removeAll();
+ finished = true;
+ const doneResult = createIterResult(undefined, true);
+ while (!unconsumedPromises.isEmpty()) {
+ unconsumedPromises.shift().resolve(doneResult);
+ }
+
+ return PromiseResolve(doneResult);
+ }
+}
+
+function listenersController() {
+ const listeners = [];
+
+ return {
+ addEventListener(emitter, event, handler, flags) {
+ eventTargetAgnosticAddListener(emitter, event, handler, flags);
+ ArrayPrototypePush(listeners, [emitter, event, handler, flags]);
+ },
+ removeAll() {
+ while (listeners.length > 0) {
+ ReflectApply(
+ eventTargetAgnosticRemoveListener,
+ undefined,
+ ArrayPrototypePop(listeners)
+ );
+ }
+ },
+ };
+}
diff --git a/.codesandbox/node/fs.js b/.codesandbox/node/fs.js
new file mode 100644
index 0000000..c505db7
--- /dev/null
+++ b/.codesandbox/node/fs.js
@@ -0,0 +1,3397 @@
+"use strict";
+
+const {
+ ArrayFromAsync,
+ ArrayPrototypePush,
+ BigIntPrototypeToString,
+ Boolean,
+ FunctionPrototypeCall,
+ MathMax,
+ Number,
+ ObjectDefineProperties,
+ ObjectDefineProperty,
+ Promise,
+ PromisePrototypeThen,
+ PromiseResolve,
+ ReflectApply,
+ SafeMap,
+ SafeSet,
+ StringPrototypeCharCodeAt,
+ StringPrototypeIndexOf,
+ StringPrototypeSlice,
+ SymbolDispose,
+ uncurryThis,
+} = primordials;
+
+const { fs: constants } = internalBinding("constants");
+const {
+ S_IFIFO,
+ S_IFLNK,
+ S_IFMT,
+ S_IFREG,
+ S_IFSOCK,
+ F_OK,
+ O_WRONLY,
+ O_SYMLINK,
+} = constants;
+
+const pathModule = require("path");
+const { isArrayBufferView } = require("internal/util/types");
+
+const binding = internalBinding("fs");
+
+const { createBlobFromFilePath } = require("internal/blob");
+
+const { Buffer } = require("buffer");
+const { isBuffer: BufferIsBuffer } = Buffer;
+const BufferToString = uncurryThis(Buffer.prototype.toString);
+const {
+ AbortError,
+ aggregateTwoErrors,
+ codes: { ERR_ACCESS_DENIED, ERR_FS_FILE_TOO_LARGE, ERR_INVALID_ARG_VALUE },
+} = require("internal/errors");
+
+const { FSReqCallback, statValues } = binding;
+const { toPathIfFileURL } = require("internal/url");
+const {
+ customPromisifyArgs: kCustomPromisifyArgsSymbol,
+ getLazy,
+ kEmptyObject,
+ promisify: { custom: kCustomPromisifiedSymbol },
+ SideEffectFreeRegExpPrototypeExec,
+ defineLazyProperties,
+ isWindows,
+ isMacOS,
+} = require("internal/util");
+const {
+ constants: { kIoMaxLength, kMaxUserId },
+ copyObject,
+ Dirent,
+ getDirent,
+ getDirents,
+ getOptions,
+ getValidatedFd,
+ getValidatedPath,
+ handleErrorFromBinding,
+ preprocessSymlinkDestination,
+ Stats,
+ getStatFsFromBinding,
+ getStatsFromBinding,
+ realpathCacheKey,
+ stringToFlags,
+ stringToSymlinkType,
+ toUnixTimestamp,
+ validateBufferArray,
+ validateCpOptions,
+ validateOffsetLengthRead,
+ validateOffsetLengthWrite,
+ validatePath,
+ validatePosition,
+ validateRmOptions,
+ validateRmOptionsSync,
+ validateRmdirOptions,
+ validateStringAfterArrayBufferView,
+ warnOnNonPortableTemplate,
+} = require("internal/fs/utils");
+const {
+ CHAR_FORWARD_SLASH,
+ CHAR_BACKWARD_SLASH,
+} = require("internal/constants");
+const {
+ isInt32,
+ parseFileMode,
+ validateBoolean,
+ validateBuffer,
+ validateEncoding,
+ validateFunction,
+ validateInteger,
+ validateObject,
+ validateOneOf,
+ validateString,
+ kValidateObjectAllowNullable,
+} = require("internal/validators");
+
+const permission = require("internal/process/permission");
+
+let fs;
+
+// Lazy loaded
+let cpFn;
+let cpSyncFn;
+let promises = null;
+let ReadStream;
+let WriteStream;
+let rimraf;
+let kResistStopPropagation;
+let ReadFileContext;
+
+// These have to be separate because of how graceful-fs happens to do it's
+// monkeypatching.
+let FileReadStream;
+let FileWriteStream;
+let Utf8Stream;
+
+function lazyLoadUtf8Stream() {
+ Utf8Stream ??= require("internal/streams/fast-utf8-stream");
+}
+
+// Ensure that callbacks run in the global context. Only use this function
+// for callbacks that are passed to the binding layer, callbacks that are
+// invoked from JS already run in the proper scope.
+function makeCallback(cb) {
+ validateFunction(cb, "cb");
+
+ return (...args) => ReflectApply(cb, this, args);
+}
+
+// Special case of `makeCallback()` that is specific to async `*stat()` calls as
+// an optimization, since the data passed back to the callback needs to be
+// transformed anyway.
+function makeStatsCallback(cb) {
+ validateFunction(cb, "cb");
+
+ return (err, stats) => {
+ if (err) return cb(err);
+ cb(err, getStatsFromBinding(stats));
+ };
+}
+
+const isFd = isInt32;
+
+function isFileType(stats, fileType) {
+ // Use stats array directly to avoid creating an fs.Stats instance just for
+ // our internal use.
+ let mode = stats[1];
+ if (typeof mode === "bigint") mode = Number(mode);
+ return (mode & S_IFMT) === fileType;
+}
+
+/**
+ * Tests a user's permissions for the file or directory
+ * specified by `path`.
+ * @param {string | Buffer | URL} path
+ * @param {number} [mode]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function access(path, mode, callback) {
+ if (typeof mode === "function") {
+ callback = mode;
+ mode = F_OK;
+ }
+
+ path = getValidatedPath(path);
+ callback = makeCallback(callback);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.access(path, mode, req);
+}
+
+/**
+ * Synchronously tests a user's permissions for the file or
+ * directory specified by `path`.
+ * @param {string | Buffer | URL} path
+ * @param {number} [mode]
+ * @returns {void}
+ */
+function accessSync(path, mode) {
+ binding.access(getValidatedPath(path), mode);
+}
+
+/**
+ * Tests whether or not the given path exists.
+ * @param {string | Buffer | URL} path
+ * @param {(exists?: boolean) => any} callback
+ * @returns {void}
+ */
+function exists(path, callback) {
+ validateFunction(callback, "cb");
+
+ function suppressedCallback(err) {
+ callback(!err);
+ }
+
+ try {
+ fs.access(path, F_OK, suppressedCallback);
+ } catch {
+ return callback(false);
+ }
+}
+
+ObjectDefineProperty(exists, kCustomPromisifiedSymbol, {
+ __proto__: null,
+ value: function exists(path) {
+ // eslint-disable-line func-name-matching
+ return new Promise((resolve) => fs.exists(path, resolve));
+ },
+});
+
+let showExistsDeprecation = true;
+/**
+ * Synchronously tests whether or not the given path exists.
+ * @param {string | Buffer | URL} path
+ * @returns {boolean}
+ */
+function existsSync(path) {
+ try {
+ path = getValidatedPath(path);
+ } catch (err) {
+ if (showExistsDeprecation && err?.code === "ERR_INVALID_ARG_TYPE") {
+ process.emitWarning(
+ "Passing invalid argument types to fs.existsSync is deprecated",
+ "DeprecationWarning",
+ "DEP0187"
+ );
+ showExistsDeprecation = false;
+ }
+ return false;
+ }
+
+ return binding.existsSync(path);
+}
+
+function readFileAfterOpen(err, fd) {
+ const context = this.context;
+
+ if (err) {
+ context.callback(err);
+ return;
+ }
+
+ context.fd = fd;
+
+ const req = new FSReqCallback();
+ req.oncomplete = readFileAfterStat;
+ req.context = context;
+ binding.fstat(fd, false, req);
+}
+
+function readFileAfterStat(err, stats) {
+ const context = this.context;
+
+ if (err) return context.close(err);
+
+ // TODO(BridgeAR): Check if allocating a smaller chunk is better performance
+ // wise, similar to the promise based version (less peak memory and chunked
+ // stringify operations vs multiple C++/JS boundary crossings).
+ const size = (context.size = isFileType(stats, S_IFREG) ? stats[8] : 0);
+
+ if (size > kIoMaxLength) {
+ err = new ERR_FS_FILE_TOO_LARGE(size);
+ return context.close(err);
+ }
+
+ try {
+ if (size === 0) {
+ // TODO(BridgeAR): If an encoding is set, use the StringDecoder to concat
+ // the result and reuse the buffer instead of allocating a new one.
+ context.buffers = [];
+ } else {
+ context.buffer = Buffer.allocUnsafeSlow(size);
+ }
+ } catch (err) {
+ return context.close(err);
+ }
+ context.read();
+}
+
+function checkAborted(signal, callback) {
+ if (signal?.aborted) {
+ callback(new AbortError(undefined, { cause: signal.reason }));
+ return true;
+ }
+ return false;
+}
+
+/**
+ * Asynchronously reads the entire contents of a file.
+ * @param {string | Buffer | URL | number} path
+ * @param {{
+ * encoding?: string | null;
+ * flag?: string;
+ * signal?: AbortSignal;
+ * } | string} [options]
+ * @param {(
+ * err?: Error,
+ * data?: string | Buffer
+ * ) => any} callback
+ * @returns {void}
+ */
+function readFile(path, options, callback) {
+ callback ||= options;
+ validateFunction(callback, "cb");
+ options = getOptions(options, { flag: "r" });
+ ReadFileContext ??= require("internal/fs/read/context");
+ const context = new ReadFileContext(callback, options.encoding);
+ context.isUserFd = isFd(path); // File descriptor ownership
+
+ if (options.signal) {
+ context.signal = options.signal;
+ }
+ if (context.isUserFd) {
+ process.nextTick(function tick(context) {
+ FunctionPrototypeCall(readFileAfterOpen, { context }, null, path);
+ }, context);
+ return;
+ }
+
+ if (checkAborted(options.signal, callback)) return;
+
+ const flagsNumber = stringToFlags(options.flag, "options.flag");
+ const req = new FSReqCallback();
+ req.context = context;
+ req.oncomplete = readFileAfterOpen;
+ binding.open(getValidatedPath(path), flagsNumber, 0o666, req);
+}
+
+function tryStatSync(fd, isUserFd) {
+ const stats = binding.fstat(fd, false, undefined, true /* shouldNotThrow */);
+ if (stats === undefined && !isUserFd) {
+ fs.closeSync(fd);
+ }
+ return stats;
+}
+
+function tryCreateBuffer(size, fd, isUserFd) {
+ let threw = true;
+ let buffer;
+ try {
+ if (size > kIoMaxLength) {
+ throw new ERR_FS_FILE_TOO_LARGE(size);
+ }
+ buffer = Buffer.allocUnsafe(size);
+ threw = false;
+ } finally {
+ if (threw && !isUserFd) fs.closeSync(fd);
+ }
+ return buffer;
+}
+
+function tryReadSync(fd, isUserFd, buffer, pos, len) {
+ let threw = true;
+ let bytesRead;
+ try {
+ bytesRead = fs.readSync(fd, buffer, pos, len);
+ threw = false;
+ } finally {
+ if (threw && !isUserFd) fs.closeSync(fd);
+ }
+ return bytesRead;
+}
+
+/**
+ * Synchronously reads the entire contents of a file.
+ * @param {string | Buffer | URL | number} path
+ * @param {{
+ * encoding?: string | null;
+ * flag?: string;
+ * }} [options]
+ * @returns {string | Buffer}
+ */
+function readFileSync(path, options) {
+ options = getOptions(options, { flag: "r" });
+
+ if (options.encoding === "utf8" || options.encoding === "utf-8") {
+ if (!isInt32(path)) {
+ path = getValidatedPath(path);
+ }
+ return binding.readFileUtf8(path, stringToFlags(options.flag));
+ }
+
+ const isUserFd = isFd(path); // File descriptor ownership
+ const fd = isUserFd ? path : fs.openSync(path, options.flag, 0o666);
+
+ const stats = tryStatSync(fd, isUserFd);
+ const size = isFileType(stats, S_IFREG) ? stats[8] : 0;
+ let pos = 0;
+ let buffer; // Single buffer with file data
+ let buffers; // List for when size is unknown
+
+ if (size === 0) {
+ buffers = [];
+ } else {
+ buffer = tryCreateBuffer(size, fd, isUserFd);
+ }
+
+ let bytesRead;
+
+ if (size !== 0) {
+ do {
+ bytesRead = tryReadSync(fd, isUserFd, buffer, pos, size - pos);
+ pos += bytesRead;
+ } while (bytesRead !== 0 && pos < size);
+ } else {
+ do {
+ // The kernel lies about many files.
+ // Go ahead and try to read some bytes.
+ buffer = Buffer.allocUnsafe(8192);
+ bytesRead = tryReadSync(fd, isUserFd, buffer, 0, 8192);
+ if (bytesRead !== 0) {
+ ArrayPrototypePush(buffers, buffer.slice(0, bytesRead));
+ }
+ pos += bytesRead;
+ } while (bytesRead !== 0);
+ }
+
+ if (!isUserFd) fs.closeSync(fd);
+
+ if (size === 0) {
+ // Data was collected into the buffers list.
+ buffer = Buffer.concat(buffers, pos);
+ } else if (pos < size) {
+ buffer = buffer.slice(0, pos);
+ }
+
+ if (options.encoding) buffer = buffer.toString(options.encoding);
+ return buffer;
+}
+
+function defaultCloseCallback(err) {
+ if (err != null) throw err;
+}
+
+/**
+ * Closes the file descriptor.
+ * @param {number} fd
+ * @param {(err?: Error) => any} [callback]
+ * @returns {void}
+ */
+function close(fd, callback = defaultCloseCallback) {
+ if (callback !== defaultCloseCallback) callback = makeCallback(callback);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.close(fd, req);
+}
+
+/**
+ * Synchronously closes the file descriptor.
+ * @param {number} fd
+ * @returns {void}
+ */
+function closeSync(fd) {
+ binding.close(fd);
+}
+
+/**
+ * Asynchronously opens a file.
+ * @param {string | Buffer | URL} path
+ * @param {string | number} [flags]
+ * @param {string | number} [mode]
+ * @param {(
+ * err?: Error,
+ * fd?: number
+ * ) => any} callback
+ * @returns {void}
+ */
+function open(path, flags, mode, callback) {
+ path = getValidatedPath(path);
+ if (arguments.length < 3) {
+ callback = flags;
+ flags = "r";
+ mode = 0o666;
+ } else if (typeof mode === "function") {
+ callback = mode;
+ mode = 0o666;
+ } else {
+ mode = parseFileMode(mode, "mode", 0o666);
+ }
+ const flagsNumber = stringToFlags(flags);
+ callback = makeCallback(callback);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+
+ binding.open(path, flagsNumber, mode, req);
+}
+
+/**
+ * Synchronously opens a file.
+ * @param {string | Buffer | URL} path
+ * @param {string | number} [flags]
+ * @param {string | number} [mode]
+ * @returns {number}
+ */
+function openSync(path, flags, mode) {
+ return binding.open(
+ getValidatedPath(path),
+ stringToFlags(flags),
+ parseFileMode(mode, "mode", 0o666)
+ );
+}
+
+/**
+ * @param {string | Buffer | URL } path
+ * @param {{
+ * type?: string;
+ * }} [options]
+ * @returns {Promise}
+ */
+function openAsBlob(path, options = kEmptyObject) {
+ validateObject(options, "options");
+ const type = options.type || "";
+ validateString(type, "options.type");
+ // The underlying implementation here returns the Blob synchronously for now.
+ // To give ourselves flexibility to maybe return the Blob asynchronously,
+ // this API returns a Promise.
+ path = getValidatedPath(path);
+ return PromiseResolve(createBlobFromFilePath(path, { type }));
+}
+
+/**
+ * Reads file from the specified `fd` (file descriptor).
+ * @param {number} fd
+ * @param {Buffer | TypedArray | DataView} buffer
+ * @param {number | {
+ * offset?: number;
+ * length?: number;
+ * position?: number | bigint | null;
+ * }} [offsetOrOptions]
+ * @param {number} length
+ * @param {number | bigint | null} position
+ * @param {(
+ * err?: Error,
+ * bytesRead?: number,
+ * buffer?: Buffer
+ * ) => any} callback
+ * @returns {void}
+ */
+function read(fd, buffer, offsetOrOptions, length, position, callback) {
+ fd = getValidatedFd(fd);
+
+ let offset = offsetOrOptions;
+ let params = null;
+ if (arguments.length <= 4) {
+ if (arguments.length === 4) {
+ // This is fs.read(fd, buffer, options, callback)
+ validateObject(offsetOrOptions, "options", kValidateObjectAllowNullable);
+ callback = length;
+ params = offsetOrOptions;
+ } else if (arguments.length === 3) {
+ // This is fs.read(fd, bufferOrParams, callback)
+ if (!isArrayBufferView(buffer)) {
+ // This is fs.read(fd, params, callback)
+ params = buffer;
+ ({ buffer = Buffer.alloc(16384) } = params ?? kEmptyObject);
+ }
+ callback = offsetOrOptions;
+ } else {
+ // This is fs.read(fd, callback)
+ callback = buffer;
+ buffer = Buffer.alloc(16384);
+ }
+
+ if (params !== undefined) {
+ validateObject(params, "options", kValidateObjectAllowNullable);
+ }
+ ({
+ offset = 0,
+ length = buffer?.byteLength - offset,
+ position = null,
+ } = params ?? kEmptyObject);
+ }
+
+ validateBuffer(buffer);
+ validateFunction(callback, "cb");
+
+ if (offset == null) {
+ offset = 0;
+ } else {
+ validateInteger(offset, "offset", 0);
+ }
+
+ length |= 0;
+
+ if (length === 0) {
+ return process.nextTick(function tick() {
+ callback(null, 0, buffer);
+ });
+ }
+
+ if (buffer.byteLength === 0) {
+ throw new ERR_INVALID_ARG_VALUE(
+ "buffer",
+ buffer,
+ "is empty and cannot be written"
+ );
+ }
+
+ validateOffsetLengthRead(offset, length, buffer.byteLength);
+
+ if (position == null) {
+ position = -1;
+ } else {
+ validatePosition(position, "position", length);
+ }
+
+ function wrapper(err, bytesRead) {
+ // Retain a reference to buffer so that it can't be GC'ed too soon.
+ callback(err, bytesRead || 0, buffer);
+ }
+
+ const req = new FSReqCallback();
+ req.oncomplete = wrapper;
+
+ binding.read(fd, buffer, offset, length, position, req);
+}
+
+ObjectDefineProperty(read, kCustomPromisifyArgsSymbol, {
+ __proto__: null,
+ value: ["bytesRead", "buffer"],
+ enumerable: false,
+});
+
+/**
+ * Synchronously reads the file from the
+ * specified `fd` (file descriptor).
+ * @param {number} fd
+ * @param {Buffer | TypedArray | DataView} buffer
+ * @param {number | {
+ * offset?: number;
+ * length?: number;
+ * position?: number | bigint | null;
+ * }} [offsetOrOptions]
+ * @param {number} [length]
+ * @param {number} [position]
+ * @returns {number}
+ */
+function readSync(fd, buffer, offsetOrOptions, length, position) {
+ fd = getValidatedFd(fd);
+
+ validateBuffer(buffer);
+
+ let offset = offsetOrOptions;
+ if (arguments.length <= 3 || typeof offsetOrOptions === "object") {
+ if (offsetOrOptions !== undefined) {
+ validateObject(offsetOrOptions, "options", kValidateObjectAllowNullable);
+ }
+
+ ({
+ offset = 0,
+ length = buffer.byteLength - offset,
+ position = null,
+ } = offsetOrOptions ?? kEmptyObject);
+ }
+
+ if (offset === undefined) {
+ offset = 0;
+ } else {
+ validateInteger(offset, "offset", 0);
+ }
+
+ length |= 0;
+
+ if (length === 0) {
+ return 0;
+ }
+
+ if (buffer.byteLength === 0) {
+ throw new ERR_INVALID_ARG_VALUE(
+ "buffer",
+ buffer,
+ "is empty and cannot be written"
+ );
+ }
+
+ validateOffsetLengthRead(offset, length, buffer.byteLength);
+
+ if (position == null) {
+ position = -1;
+ } else {
+ validatePosition(position, "position", length);
+ }
+
+ return binding.read(fd, buffer, offset, length, position);
+}
+
+/**
+ * Reads file from the specified `fd` (file descriptor)
+ * and writes to an array of `ArrayBufferView`s.
+ * @param {number} fd
+ * @param {ArrayBufferView[]} buffers
+ * @param {number | null} [position]
+ * @param {(
+ * err?: Error,
+ * bytesRead?: number,
+ * buffers?: ArrayBufferView[]
+ * ) => any} callback
+ * @returns {void}
+ */
+function readv(fd, buffers, position, callback) {
+ function wrapper(err, read) {
+ callback(err, read || 0, buffers);
+ }
+
+ fd = getValidatedFd(fd);
+ validateBufferArray(buffers);
+ callback ||= position;
+ validateFunction(callback, "cb");
+
+ const req = new FSReqCallback();
+ req.oncomplete = wrapper;
+
+ if (typeof position !== "number") position = null;
+
+ binding.readBuffers(fd, buffers, position, req);
+}
+
+ObjectDefineProperty(readv, kCustomPromisifyArgsSymbol, {
+ __proto__: null,
+ value: ["bytesRead", "buffers"],
+ enumerable: false,
+});
+
+/**
+ * Synchronously reads file from the
+ * specified `fd` (file descriptor) and writes to an array
+ * of `ArrayBufferView`s.
+ * @param {number} fd
+ * @param {ArrayBufferView[]} buffers
+ * @param {number | null} [position]
+ * @returns {number}
+ */
+function readvSync(fd, buffers, position) {
+ fd = getValidatedFd(fd);
+ validateBufferArray(buffers);
+
+ if (typeof position !== "number") position = null;
+
+ return binding.readBuffers(fd, buffers, position);
+}
+
+/**
+ * Writes `buffer` to the specified `fd` (file descriptor).
+ * @param {number} fd
+ * @param {Buffer | TypedArray | DataView | string} buffer
+ * @param {number | object} [offsetOrOptions]
+ * @param {number} [length]
+ * @param {number | null} [position]
+ * @param {(
+ * err?: Error,
+ * bytesWritten?: number,
+ * buffer?: Buffer | TypedArray | DataView
+ * ) => any} callback
+ * @returns {void}
+ */
+function write(fd, buffer, offsetOrOptions, length, position, callback) {
+ function wrapper(err, written) {
+ // Retain a reference to buffer so that it can't be GC'ed too soon.
+ callback(err, written || 0, buffer);
+ }
+
+ fd = getValidatedFd(fd);
+
+ let offset = offsetOrOptions;
+ if (isArrayBufferView(buffer)) {
+ callback ||= position || length || offset;
+ validateFunction(callback, "cb");
+
+ if (typeof offset === "object") {
+ ({
+ offset = 0,
+ length = buffer.byteLength - offset,
+ position = null,
+ } = offsetOrOptions ?? kEmptyObject);
+ }
+
+ if (offset == null || typeof offset === "function") {
+ offset = 0;
+ } else {
+ validateInteger(offset, "offset", 0);
+ }
+ if (typeof length !== "number") length = buffer.byteLength - offset;
+ if (typeof position !== "number") position = null;
+ validateOffsetLengthWrite(offset, length, buffer.byteLength);
+
+ const req = new FSReqCallback();
+ req.oncomplete = wrapper;
+ binding.writeBuffer(fd, buffer, offset, length, position, req);
+ return;
+ }
+
+ validateStringAfterArrayBufferView(buffer, "buffer");
+
+ if (typeof position !== "function") {
+ if (typeof offset === "function") {
+ position = offset;
+ offset = null;
+ } else {
+ position = length;
+ }
+ length = "utf8";
+ }
+
+ const str = buffer;
+ validateEncoding(str, length);
+ callback = position;
+ validateFunction(callback, "cb");
+
+ const req = new FSReqCallback();
+ req.oncomplete = wrapper;
+ binding.writeString(fd, str, offset, length, req);
+}
+
+ObjectDefineProperty(write, kCustomPromisifyArgsSymbol, {
+ __proto__: null,
+ value: ["bytesWritten", "buffer"],
+ enumerable: false,
+});
+
+/**
+ * Synchronously writes `buffer` to the
+ * specified `fd` (file descriptor).
+ * @param {number} fd
+ * @param {Buffer | TypedArray | DataView | string} buffer
+ * @param {{
+ * offset?: number;
+ * length?: number;
+ * position?: number | null;
+ * }} [offsetOrOptions]
+ * @param {number} [length]
+ * @param {number} [position]
+ * @returns {number}
+ */
+function writeSync(fd, buffer, offsetOrOptions, length, position) {
+ fd = getValidatedFd(fd);
+ const ctx = {};
+ let result;
+
+ let offset = offsetOrOptions;
+ if (isArrayBufferView(buffer)) {
+ if (typeof offset === "object") {
+ ({
+ offset = 0,
+ length = buffer.byteLength - offset,
+ position = null,
+ } = offsetOrOptions ?? kEmptyObject);
+ }
+ if (position === undefined) position = null;
+ if (offset == null) {
+ offset = 0;
+ } else {
+ validateInteger(offset, "offset", 0);
+ }
+ if (typeof length !== "number") length = buffer.byteLength - offset;
+ validateOffsetLengthWrite(offset, length, buffer.byteLength);
+ result = binding.writeBuffer(
+ fd,
+ buffer,
+ offset,
+ length,
+ position,
+ undefined,
+ ctx
+ );
+ } else {
+ validateStringAfterArrayBufferView(buffer, "buffer");
+ validateEncoding(buffer, length);
+
+ if (offset === undefined) offset = null;
+ result = binding.writeString(fd, buffer, offset, length, undefined, ctx);
+ }
+ handleErrorFromBinding(ctx);
+ return result;
+}
+
+/**
+ * Writes an array of `ArrayBufferView`s to the
+ * specified `fd` (file descriptor).
+ * @param {number} fd
+ * @param {ArrayBufferView[]} buffers
+ * @param {number | null} [position]
+ * @param {(
+ * err?: Error,
+ * bytesWritten?: number,
+ * buffers?: ArrayBufferView[]
+ * ) => any} callback
+ * @returns {void}
+ */
+function writev(fd, buffers, position, callback) {
+ function wrapper(err, written) {
+ callback(err, written || 0, buffers);
+ }
+
+ fd = getValidatedFd(fd);
+ validateBufferArray(buffers);
+ callback ||= position;
+ validateFunction(callback, "cb");
+
+ if (buffers.length === 0) {
+ process.nextTick(callback, null, 0, buffers);
+ return;
+ }
+
+ const req = new FSReqCallback();
+ req.oncomplete = wrapper;
+
+ if (typeof position !== "number") position = null;
+
+ binding.writeBuffers(fd, buffers, position, req);
+}
+
+ObjectDefineProperty(writev, kCustomPromisifyArgsSymbol, {
+ __proto__: null,
+ value: ["bytesWritten", "buffer"],
+ enumerable: false,
+});
+
+/**
+ * Synchronously writes an array of `ArrayBufferView`s
+ * to the specified `fd` (file descriptor).
+ * @param {number} fd
+ * @param {ArrayBufferView[]} buffers
+ * @param {number | null} [position]
+ * @returns {number}
+ */
+function writevSync(fd, buffers, position) {
+ fd = getValidatedFd(fd);
+ validateBufferArray(buffers);
+
+ if (buffers.length === 0) {
+ return 0;
+ }
+
+ if (typeof position !== "number") position = null;
+
+ return binding.writeBuffers(fd, buffers, position);
+}
+
+/**
+ * Asynchronously renames file at `oldPath` to
+ * the pathname provided as `newPath`.
+ * @param {string | Buffer | URL} oldPath
+ * @param {string | Buffer | URL} newPath
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function rename(oldPath, newPath, callback) {
+ callback = makeCallback(callback);
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.rename(
+ getValidatedPath(oldPath, "oldPath"),
+ getValidatedPath(newPath, "newPath"),
+ req
+ );
+}
+
+/**
+ * Synchronously renames file at `oldPath` to
+ * the pathname provided as `newPath`.
+ * @param {string | Buffer | URL} oldPath
+ * @param {string | Buffer | URL} newPath
+ * @returns {void}
+ */
+function renameSync(oldPath, newPath) {
+ binding.rename(
+ getValidatedPath(oldPath, "oldPath"),
+ getValidatedPath(newPath, "newPath")
+ );
+}
+
+/**
+ * Truncates the file.
+ * @param {string | Buffer | URL} path
+ * @param {number} [len]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function truncate(path, len, callback) {
+ if (typeof len === "function") {
+ callback = len;
+ len = 0;
+ } else if (len === undefined) {
+ len = 0;
+ }
+
+ validateInteger(len, "len");
+ len = MathMax(0, len);
+ validateFunction(callback, "cb");
+ fs.open(path, "r+", (er, fd) => {
+ if (er) return callback(er);
+ const req = new FSReqCallback();
+ req.oncomplete = function oncomplete(er) {
+ fs.close(fd, (er2) => {
+ callback(aggregateTwoErrors(er2, er));
+ });
+ };
+ binding.ftruncate(fd, len, req);
+ });
+}
+
+/**
+ * Synchronously truncates the file.
+ * @param {string | Buffer | URL} path
+ * @param {number} [len]
+ * @returns {void}
+ */
+function truncateSync(path, len) {
+ if (len === undefined) {
+ len = 0;
+ }
+ // Allow error to be thrown, but still close fd.
+ const fd = fs.openSync(path, "r+");
+ try {
+ fs.ftruncateSync(fd, len);
+ } finally {
+ fs.closeSync(fd);
+ }
+}
+
+/**
+ * Truncates the file descriptor.
+ * @param {number} fd
+ * @param {number} [len]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function ftruncate(fd, len = 0, callback) {
+ if (typeof len === "function") {
+ callback = len;
+ len = 0;
+ }
+ validateInteger(len, "len");
+ len = MathMax(0, len);
+ callback = makeCallback(callback);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.ftruncate(fd, len, req);
+}
+
+/**
+ * Synchronously truncates the file descriptor.
+ * @param {number} fd
+ * @param {number} [len]
+ * @returns {void}
+ */
+function ftruncateSync(fd, len = 0) {
+ validateInteger(len, "len");
+ binding.ftruncate(fd, len < 0 ? 0 : len);
+}
+
+function lazyLoadCp() {
+ if (cpFn === undefined) {
+ ({ cpFn } = require("internal/fs/cp/cp"));
+ cpFn = require("util").callbackify(cpFn);
+ ({ cpSyncFn } = require("internal/fs/cp/cp-sync"));
+ }
+}
+
+function lazyLoadRimraf() {
+ if (rimraf === undefined) ({ rimraf } = require("internal/fs/rimraf"));
+}
+
+/**
+ * Asynchronously removes a directory.
+ * @param {string | Buffer | URL} path
+ * @param {object} [options]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function rmdir(path, options, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = undefined;
+ }
+
+ if (options?.recursive !== undefined) {
+ // This API previously accepted a `recursive` option that was deprecated
+ // and removed. However, in order to make the change more visible, we
+ // opted to throw an error if recursive is specified rather than removing it
+ // entirely.
+ throw new ERR_INVALID_ARG_VALUE(
+ "options.recursive",
+ options.recursive,
+ "is no longer supported"
+ );
+ }
+
+ callback = makeCallback(callback);
+ path = getValidatedPath(path);
+
+ validateRmdirOptions(options);
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.rmdir(path, req);
+}
+
+/**
+ * Synchronously removes a directory.
+ * @param {string | Buffer | URL} path
+ * @param {object} [options]
+ * @returns {void}
+ */
+function rmdirSync(path, options) {
+ path = getValidatedPath(path);
+
+ if (options?.recursive !== undefined) {
+ throw new ERR_INVALID_ARG_VALUE(
+ "options.recursive",
+ options.recursive,
+ "is no longer supported"
+ );
+ }
+
+ validateRmdirOptions(options);
+ binding.rmdir(path);
+}
+
+/**
+ * Asynchronously removes files and
+ * directories (modeled on the standard POSIX `rm` utility).
+ * @param {string | Buffer | URL} path
+ * @param {{
+ * force?: boolean;
+ * maxRetries?: number;
+ * recursive?: boolean;
+ * retryDelay?: number;
+ * }} [options]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function rm(path, options, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = undefined;
+ }
+ path = getValidatedPath(path);
+
+ validateRmOptions(path, options, false, (err, options) => {
+ if (err) {
+ return callback(err);
+ }
+ lazyLoadRimraf();
+ return rimraf(path, options, callback);
+ });
+}
+
+/**
+ * Synchronously removes files and
+ * directories (modeled on the standard POSIX `rm` utility).
+ * @param {string | Buffer | URL} path
+ * @param {{
+ * force?: boolean;
+ * maxRetries?: number;
+ * recursive?: boolean;
+ * retryDelay?: number;
+ * }} [options]
+ * @returns {void}
+ */
+function rmSync(path, options) {
+ const opts = validateRmOptionsSync(path, options, false);
+ return binding.rmSync(
+ getValidatedPath(path),
+ opts.maxRetries,
+ opts.recursive,
+ opts.retryDelay
+ );
+}
+
+/**
+ * Forces all currently queued I/O operations associated
+ * with the file to the operating system's synchronized
+ * I/O completion state.
+ * @param {number} fd
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function fdatasync(fd, callback) {
+ const req = new FSReqCallback();
+ req.oncomplete = makeCallback(callback);
+
+ if (permission.isEnabled()) {
+ callback(
+ new ERR_ACCESS_DENIED(
+ "fdatasync API is disabled when Permission Model is enabled."
+ )
+ );
+ return;
+ }
+ binding.fdatasync(fd, req);
+}
+
+/**
+ * Synchronously forces all currently queued I/O operations
+ * associated with the file to the operating
+ * system's synchronized I/O completion state.
+ * @param {number} fd
+ * @returns {void}
+ */
+function fdatasyncSync(fd) {
+ if (permission.isEnabled()) {
+ throw new ERR_ACCESS_DENIED(
+ "fdatasync API is disabled when Permission Model is enabled."
+ );
+ }
+ binding.fdatasync(fd);
+}
+
+/**
+ * Requests for all data for the open file descriptor
+ * to be flushed to the storage device.
+ * @param {number} fd
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function fsync(fd, callback) {
+ const req = new FSReqCallback();
+ req.oncomplete = makeCallback(callback);
+ if (permission.isEnabled()) {
+ callback(
+ new ERR_ACCESS_DENIED(
+ "fsync API is disabled when Permission Model is enabled."
+ )
+ );
+ return;
+ }
+ binding.fsync(fd, req);
+}
+
+/**
+ * Synchronously requests for all data for the open
+ * file descriptor to be flushed to the storage device.
+ * @param {number} fd
+ * @returns {void}
+ */
+function fsyncSync(fd) {
+ if (permission.isEnabled()) {
+ throw new ERR_ACCESS_DENIED(
+ "fsync API is disabled when Permission Model is enabled."
+ );
+ }
+ binding.fsync(fd);
+}
+
+/**
+ * Asynchronously creates a directory.
+ * @param {string | Buffer | URL} path
+ * @param {{
+ * recursive?: boolean;
+ * mode?: string | number;
+ * } | number} [options]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function mkdir(path, options, callback) {
+ let mode = 0o777;
+ let recursive = false;
+ if (typeof options === "function") {
+ callback = options;
+ } else if (typeof options === "number" || typeof options === "string") {
+ mode = parseFileMode(options, "mode");
+ } else if (options) {
+ if (options.recursive !== undefined) {
+ recursive = options.recursive;
+ validateBoolean(recursive, "options.recursive");
+ }
+ if (options.mode !== undefined) {
+ mode = parseFileMode(options.mode, "options.mode");
+ }
+ }
+ callback = makeCallback(callback);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.mkdir(getValidatedPath(path), mode, recursive, req);
+}
+
+/**
+ * Synchronously creates a directory.
+ * @param {string | Buffer | URL} path
+ * @param {{
+ * recursive?: boolean;
+ * mode?: string | number;
+ * } | number} [options]
+ * @returns {string | void}
+ */
+function mkdirSync(path, options) {
+ let mode = 0o777;
+ let recursive = false;
+ if (typeof options === "number" || typeof options === "string") {
+ mode = parseFileMode(options, "mode");
+ } else if (options) {
+ if (options.recursive !== undefined) {
+ recursive = options.recursive;
+ validateBoolean(recursive, "options.recursive");
+ }
+ if (options.mode !== undefined) {
+ mode = parseFileMode(options.mode, "options.mode");
+ }
+ }
+
+ const result = binding.mkdir(getValidatedPath(path), mode, recursive);
+
+ if (recursive) {
+ return result;
+ }
+}
+
+/*
+ * An recursive algorithm for reading the entire contents of the `basePath` directory.
+ * This function does not validate `basePath` as a directory. It is passed directly to
+ * `binding.readdir`.
+ * @param {string} basePath
+ * @param {{ encoding: string, withFileTypes: boolean }} options
+ * @param {(
+ * err?: Error,
+ * files?: string[] | Buffer[] | Dirent[]
+ * ) => any} callback
+ * @returns {void}
+ */
+function readdirRecursive(basePath, options, callback) {
+ const context = {
+ withFileTypes: Boolean(options.withFileTypes),
+ encoding: options.encoding,
+ basePath,
+ readdirResults: [],
+ pathsQueue: [basePath],
+ };
+
+ let i = 0;
+
+ function read(path) {
+ const req = new FSReqCallback();
+ req.oncomplete = (err, result) => {
+ if (err) {
+ callback(err);
+ return;
+ }
+
+ if (result === undefined) {
+ callback(null, context.readdirResults);
+ return;
+ }
+
+ processReaddirResult({
+ result,
+ currentPath: path,
+ context,
+ });
+
+ if (i < context.pathsQueue.length) {
+ read(context.pathsQueue[i++]);
+ } else {
+ callback(null, context.readdirResults);
+ }
+ };
+
+ binding.readdir(path, context.encoding, context.withFileTypes, req);
+ }
+
+ read(context.pathsQueue[i++]);
+}
+
+// Calling `readdir` with `withFileTypes=true`, the result is an array of arrays.
+// The first array is the names, and the second array is the types.
+// They are guaranteed to be the same length; hence, setting `length` to the length
+// of the first array within the result.
+const processReaddirResult = (args) =>
+ args.context.withFileTypes ? handleDirents(args) : handleFilePaths(args);
+
+function handleDirents({ result, currentPath, context }) {
+ const { 0: names, 1: types } = result;
+ const { length } = names;
+
+ for (let i = 0; i < length; i++) {
+ // Avoid excluding symlinks, as they are not directories.
+ // Refs: https://github.com/nodejs/node/issues/52663
+ const fullPath = pathModule.join(currentPath, names[i]);
+ const dirent = getDirent(currentPath, names[i], types[i]);
+ ArrayPrototypePush(context.readdirResults, dirent);
+
+ if (dirent.isDirectory() || binding.internalModuleStat(fullPath) === 1) {
+ ArrayPrototypePush(context.pathsQueue, fullPath);
+ }
+ }
+}
+
+function handleFilePaths({ result, currentPath, context }) {
+ for (let i = 0; i < result.length; i++) {
+ const resultPath = pathModule.join(currentPath, result[i]);
+ const relativeResultPath = pathModule.relative(
+ context.basePath,
+ resultPath
+ );
+ const stat = binding.internalModuleStat(resultPath);
+ ArrayPrototypePush(context.readdirResults, relativeResultPath);
+
+ if (stat === 1) {
+ ArrayPrototypePush(context.pathsQueue, resultPath);
+ }
+ }
+}
+
+/**
+ * An iterative algorithm for reading the entire contents of the `basePath` directory.
+ * This function does not validate `basePath` as a directory. It is passed directly to
+ * `binding.readdir`.
+ * @param {string} basePath
+ * @param {{ encoding: string, withFileTypes: boolean }} options
+ * @returns {string[] | Dirent[]}
+ */
+function readdirSyncRecursive(basePath, options) {
+ const context = {
+ withFileTypes: Boolean(options.withFileTypes),
+ encoding: options.encoding,
+ basePath,
+ readdirResults: [],
+ pathsQueue: [basePath],
+ };
+
+ function read(path) {
+ const readdirResult = binding.readdir(
+ path,
+ context.encoding,
+ context.withFileTypes
+ );
+
+ if (readdirResult === undefined) {
+ return;
+ }
+
+ processReaddirResult({
+ result: readdirResult,
+ currentPath: path,
+ context,
+ });
+ }
+
+ for (let i = 0; i < context.pathsQueue.length; i++) {
+ read(context.pathsQueue[i]);
+ }
+
+ return context.readdirResults;
+}
+
+/**
+ * Reads the contents of a directory.
+ * @param {string | Buffer | URL} path
+ * @param {string | {
+ * encoding?: string;
+ * withFileTypes?: boolean;
+ * recursive?: boolean;
+ * }} [options]
+ * @param {(
+ * err?: Error,
+ * files?: string[] | Buffer[] | Dirent[]
+ * ) => any} callback
+ * @returns {void}
+ */
+function readdir(path, options, callback) {
+ callback = makeCallback(typeof options === "function" ? options : callback);
+ options = getOptions(options);
+ path = getValidatedPath(path);
+ if (options.recursive != null) {
+ validateBoolean(options.recursive, "options.recursive");
+ }
+
+ if (options.recursive) {
+ // Make shallow copy to prevent mutating options from affecting results
+ options = copyObject(options);
+
+ readdirRecursive(path, options, callback);
+ return;
+ }
+
+ const req = new FSReqCallback();
+ if (!options.withFileTypes) {
+ req.oncomplete = callback;
+ } else {
+ req.oncomplete = (err, result) => {
+ if (err) {
+ callback(err);
+ return;
+ }
+ getDirents(path, result, callback);
+ };
+ }
+ binding.readdir(path, options.encoding, !!options.withFileTypes, req);
+}
+
+/**
+ * Synchronously reads the contents of a directory.
+ * @param {string | Buffer | URL} path
+ * @param {string | {
+ * encoding?: string;
+ * withFileTypes?: boolean;
+ * recursive?: boolean;
+ * }} [options]
+ * @returns {string | Buffer[] | Dirent[]}
+ */
+function readdirSync(path, options) {
+ options = getOptions(options);
+ path = getValidatedPath(path);
+ if (options.recursive != null) {
+ validateBoolean(options.recursive, "options.recursive");
+ }
+
+ if (options.recursive) {
+ return readdirSyncRecursive(path, options);
+ }
+
+ const result = binding.readdir(
+ path,
+ options.encoding,
+ !!options.withFileTypes
+ );
+
+ return result !== undefined && options.withFileTypes
+ ? getDirents(path, result)
+ : result;
+}
+
+/**
+ * Invokes the callback with the `fs.Stats`
+ * for the file descriptor.
+ * @param {number} fd
+ * @param {{ bigint?: boolean; }} [options]
+ * @param {(
+ * err?: Error,
+ * stats?: Stats
+ * ) => any} [callback]
+ * @returns {void}
+ */
+function fstat(fd, options = { bigint: false }, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = kEmptyObject;
+ }
+ callback = makeStatsCallback(callback);
+
+ const req = new FSReqCallback(options.bigint);
+ req.oncomplete = callback;
+ binding.fstat(fd, options.bigint, req);
+}
+
+/**
+ * Retrieves the `fs.Stats` for the symbolic link
+ * referred to by the `path`.
+ * @param {string | Buffer | URL} path
+ * @param {{ bigint?: boolean; }} [options]
+ * @param {(
+ * err?: Error,
+ * stats?: Stats
+ * ) => any} callback
+ * @returns {void}
+ */
+function lstat(path, options = { bigint: false }, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = kEmptyObject;
+ }
+ callback = makeStatsCallback(callback);
+ path = getValidatedPath(path);
+ if (permission.isEnabled() && !permission.has("fs.read", path)) {
+ const resource = BufferIsBuffer(path) ? BufferToString(path) : path;
+ callback(
+ new ERR_ACCESS_DENIED(
+ "Access to this API has been restricted",
+ "FileSystemRead",
+ resource
+ )
+ );
+ return;
+ }
+
+ const req = new FSReqCallback(options.bigint);
+ req.oncomplete = callback;
+ binding.lstat(path, options.bigint, req);
+}
+
+/**
+ * Asynchronously gets the stats of a file.
+ * @param {string | Buffer | URL} path
+ * @param {{ bigint?: boolean; }} [options]
+ * @param {(
+ * err?: Error,
+ * stats?: Stats
+ * ) => any} callback
+ * @returns {void}
+ */
+function stat(path, options = { bigint: false }, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = kEmptyObject;
+ }
+ callback = makeStatsCallback(callback);
+
+ const req = new FSReqCallback(options.bigint);
+ req.oncomplete = callback;
+ binding.stat(getValidatedPath(path), options.bigint, req);
+}
+
+function statfs(path, options = { bigint: false }, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = kEmptyObject;
+ }
+ validateFunction(callback, "cb");
+ path = getValidatedPath(path);
+ const req = new FSReqCallback(options.bigint);
+ req.oncomplete = (err, stats) => {
+ if (err) {
+ return callback(err);
+ }
+
+ callback(err, getStatFsFromBinding(stats));
+ };
+ binding.statfs(getValidatedPath(path), options.bigint, req);
+}
+
+/**
+ * Synchronously retrieves the `fs.Stats` for
+ * the file descriptor.
+ * @param {number} fd
+ * @param {{ bigint?: boolean; }} [options]
+ * @returns {Stats | undefined}
+ */
+function fstatSync(fd, options = { bigint: false }) {
+ const stats = binding.fstat(fd, options.bigint, undefined, false);
+ if (stats === undefined) {
+ return;
+ }
+ return getStatsFromBinding(stats);
+}
+
+/**
+ * Synchronously retrieves the `fs.Stats` for
+ * the symbolic link referred to by the `path`.
+ * @param {string | Buffer | URL} path
+ * @param {{
+ * bigint?: boolean;
+ * throwIfNoEntry?: boolean;
+ * }} [options]
+ * @returns {Stats | undefined}
+ */
+function lstatSync(path, options = { bigint: false, throwIfNoEntry: true }) {
+ path = getValidatedPath(path);
+ if (permission.isEnabled() && !permission.has("fs.read", path)) {
+ const resource = BufferIsBuffer(path) ? BufferToString(path) : path;
+ throw new ERR_ACCESS_DENIED(
+ "Access to this API has been restricted",
+ "FileSystemRead",
+ resource
+ );
+ }
+ const stats = binding.lstat(
+ getValidatedPath(path),
+ options.bigint,
+ undefined,
+ options.throwIfNoEntry
+ );
+
+ if (stats === undefined) {
+ return;
+ }
+ return getStatsFromBinding(stats);
+}
+
+/**
+ * Synchronously retrieves the `fs.Stats`
+ * for the `path`.
+ * @param {string | Buffer | URL} path
+ * @param {{
+ * bigint?: boolean;
+ * throwIfNoEntry?: boolean;
+ * }} [options]
+ * @returns {Stats}
+ */
+function statSync(path, options = { bigint: false, throwIfNoEntry: true }) {
+ const stats = binding.stat(
+ getValidatedPath(path),
+ options.bigint,
+ undefined,
+ options.throwIfNoEntry
+ );
+ if (stats === undefined) {
+ return undefined;
+ }
+ return getStatsFromBinding(stats);
+}
+
+function statfsSync(path, options = { bigint: false }) {
+ const stats = binding.statfs(getValidatedPath(path), options.bigint);
+ return getStatFsFromBinding(stats);
+}
+
+/**
+ * Reads the contents of a symbolic link
+ * referred to by `path`.
+ * @param {string | Buffer | URL} path
+ * @param {{ encoding?: string; } | string} [options]
+ * @param {(
+ * err?: Error,
+ * linkString?: string | Buffer
+ * ) => any} callback
+ * @returns {void}
+ */
+function readlink(path, options, callback) {
+ callback = makeCallback(typeof options === "function" ? options : callback);
+ options = getOptions(options);
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.readlink(getValidatedPath(path), options.encoding, req);
+}
+
+/**
+ * Synchronously reads the contents of a symbolic link
+ * referred to by `path`.
+ * @param {string | Buffer | URL} path
+ * @param {{ encoding?: string; } | string} [options]
+ * @returns {string | Buffer}
+ */
+function readlinkSync(path, options) {
+ options = getOptions(options);
+ return binding.readlink(getValidatedPath(path), options.encoding);
+}
+
+/**
+ * Creates the link called `path` pointing to `target`.
+ * @param {string | Buffer | URL} target
+ * @param {string | Buffer | URL} path
+ * @param {string | null} [type]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function symlink(target, path, type, callback) {
+ if (callback === undefined) {
+ callback = makeCallback(type);
+ type = undefined;
+ } else {
+ validateOneOf(type, "type", ["dir", "file", "junction", null, undefined]);
+ }
+
+ // Due to the nature of Node.js runtime, symlinks has different edge cases that can bypass
+ // the permission model security guarantees. Thus, this API is disabled unless fs.read
+ // and fs.write permission has been given.
+ if (permission.isEnabled() && !permission.has("fs")) {
+ callback(
+ new ERR_ACCESS_DENIED(
+ "fs.symlink API requires full fs.read and fs.write permissions."
+ )
+ );
+ return;
+ }
+
+ target = getValidatedPath(target, "target");
+ path = getValidatedPath(path);
+
+ if (isWindows && type == null) {
+ let absoluteTarget;
+ try {
+ // Symlinks targets can be relative to the newly created path.
+ // Calculate absolute file name of the symlink target, and check
+ // if it is a directory. Ignore resolve error to keep symlink
+ // errors consistent between platforms if invalid path is
+ // provided.
+ absoluteTarget = pathModule.resolve(path, "..", target);
+ } catch {
+ // Continue regardless of error.
+ }
+ if (absoluteTarget !== undefined) {
+ stat(absoluteTarget, (err, stat) => {
+ const resolvedType = !err && stat.isDirectory() ? "dir" : "file";
+ const resolvedFlags = stringToSymlinkType(resolvedType);
+ const destination = preprocessSymlinkDestination(
+ target,
+ resolvedType,
+ path
+ );
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.symlink(destination, path, resolvedFlags, req);
+ });
+ return;
+ }
+ }
+
+ const destination = preprocessSymlinkDestination(target, type, path);
+
+ const flags = stringToSymlinkType(type);
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.symlink(destination, path, flags, req);
+}
+
+/**
+ * Synchronously creates the link called `path`
+ * pointing to `target`.
+ * @param {string | Buffer | URL} target
+ * @param {string | Buffer | URL} path
+ * @param {string | null} [type]
+ * @returns {void}
+ */
+function symlinkSync(target, path, type) {
+ validateOneOf(type, "type", ["dir", "file", "junction", null, undefined]);
+ if (isWindows && type == null) {
+ const absoluteTarget = pathModule.resolve(`${path}`, "..", `${target}`);
+ if (statSync(absoluteTarget, { throwIfNoEntry: false })?.isDirectory()) {
+ type = "dir";
+ }
+ }
+
+ // Due to the nature of Node.js runtime, symlinks has different edge cases that can bypass
+ // the permission model security guarantees. Thus, this API is disabled unless fs.read
+ // and fs.write permission has been given.
+ if (permission.isEnabled() && !permission.has("fs")) {
+ throw new ERR_ACCESS_DENIED(
+ "fs.symlink API requires full fs.read and fs.write permissions."
+ );
+ }
+
+ target = getValidatedPath(target, "target");
+ path = getValidatedPath(path);
+
+ binding.symlink(
+ preprocessSymlinkDestination(target, type, path),
+ path,
+ stringToSymlinkType(type)
+ );
+}
+
+/**
+ * Creates a new link from the `existingPath`
+ * to the `newPath`.
+ * @param {string | Buffer | URL} existingPath
+ * @param {string | Buffer | URL} newPath
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function link(existingPath, newPath, callback) {
+ callback = makeCallback(callback);
+
+ existingPath = getValidatedPath(existingPath, "existingPath");
+ newPath = getValidatedPath(newPath, "newPath");
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+
+ binding.link(existingPath, newPath, req);
+}
+
+/**
+ * Synchronously creates a new link from the `existingPath`
+ * to the `newPath`.
+ * @param {string | Buffer | URL} existingPath
+ * @param {string | Buffer | URL} newPath
+ * @returns {void}
+ */
+function linkSync(existingPath, newPath) {
+ existingPath = getValidatedPath(existingPath, "existingPath");
+ newPath = getValidatedPath(newPath, "newPath");
+
+ binding.link(existingPath, newPath);
+}
+
+/**
+ * Asynchronously removes a file or symbolic link.
+ * @param {string | Buffer | URL} path
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function unlink(path, callback) {
+ callback = makeCallback(callback);
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.unlink(getValidatedPath(path), req);
+}
+
+/**
+ * Synchronously removes a file or symbolic link.
+ * @param {string | Buffer | URL} path
+ * @returns {void}
+ */
+function unlinkSync(path) {
+ binding.unlink(getValidatedPath(path));
+}
+
+/**
+ * Sets the permissions on the file.
+ * @param {number} fd
+ * @param {string | number} mode
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function fchmod(fd, mode, callback) {
+ mode = parseFileMode(mode, "mode");
+ callback = makeCallback(callback);
+
+ if (permission.isEnabled()) {
+ callback(
+ new ERR_ACCESS_DENIED(
+ "fchmod API is disabled when Permission Model is enabled."
+ )
+ );
+ return;
+ }
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.fchmod(fd, mode, req);
+}
+
+/**
+ * Synchronously sets the permissions on the file.
+ * @param {number} fd
+ * @param {string | number} mode
+ * @returns {void}
+ */
+function fchmodSync(fd, mode) {
+ if (permission.isEnabled()) {
+ throw new ERR_ACCESS_DENIED(
+ "fchmod API is disabled when Permission Model is enabled."
+ );
+ }
+ binding.fchmod(fd, parseFileMode(mode, "mode"));
+}
+
+/**
+ * Changes the permissions on a symbolic link.
+ * @param {string | Buffer | URL} path
+ * @param {number} mode
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function lchmod(path, mode, callback) {
+ validateFunction(callback, "cb");
+ mode = parseFileMode(mode, "mode");
+ fs.open(path, O_WRONLY | O_SYMLINK, (err, fd) => {
+ if (err) {
+ callback(err);
+ return;
+ }
+ // Prefer to return the chmod error, if one occurs,
+ // but still try to close, and report closing errors if they occur.
+ fs.fchmod(fd, mode, (err) => {
+ fs.close(fd, (err2) => {
+ callback(aggregateTwoErrors(err2, err));
+ });
+ });
+ });
+}
+
+/**
+ * Synchronously changes the permissions on a symbolic link.
+ * @param {string | Buffer | URL} path
+ * @param {number} mode
+ * @returns {void}
+ */
+function lchmodSync(path, mode) {
+ const fd = fs.openSync(path, O_WRONLY | O_SYMLINK);
+
+ // Prefer to return the chmod error, if one occurs,
+ // but still try to close, and report closing errors if they occur.
+ try {
+ fs.fchmodSync(fd, mode);
+ } finally {
+ fs.closeSync(fd);
+ }
+}
+
+/**
+ * Asynchronously changes the permissions of a file.
+ * @param {string | Buffer | URL} path
+ * @param {string | number} mode
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function chmod(path, mode, callback) {
+ path = getValidatedPath(path);
+ mode = parseFileMode(mode, "mode");
+ callback = makeCallback(callback);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.chmod(path, mode, req);
+}
+
+/**
+ * Synchronously changes the permissions of a file.
+ * @param {string | Buffer | URL} path
+ * @param {string | number} mode
+ * @returns {void}
+ */
+function chmodSync(path, mode) {
+ path = getValidatedPath(path);
+ mode = parseFileMode(mode, "mode");
+
+ binding.chmod(path, mode);
+}
+
+/**
+ * Sets the owner of the symbolic link.
+ * @param {string | Buffer | URL} path
+ * @param {number} uid
+ * @param {number} gid
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function lchown(path, uid, gid, callback) {
+ callback = makeCallback(callback);
+ path = getValidatedPath(path);
+ validateInteger(uid, "uid", -1, kMaxUserId);
+ validateInteger(gid, "gid", -1, kMaxUserId);
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.lchown(path, uid, gid, req);
+}
+
+/**
+ * Synchronously sets the owner of the symbolic link.
+ * @param {string | Buffer | URL} path
+ * @param {number} uid
+ * @param {number} gid
+ * @returns {void}
+ */
+function lchownSync(path, uid, gid) {
+ path = getValidatedPath(path);
+ validateInteger(uid, "uid", -1, kMaxUserId);
+ validateInteger(gid, "gid", -1, kMaxUserId);
+ binding.lchown(path, uid, gid);
+}
+
+/**
+ * Sets the owner of the file.
+ * @param {number} fd
+ * @param {number} uid
+ * @param {number} gid
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function fchown(fd, uid, gid, callback) {
+ validateInteger(uid, "uid", -1, kMaxUserId);
+ validateInteger(gid, "gid", -1, kMaxUserId);
+ callback = makeCallback(callback);
+ if (permission.isEnabled()) {
+ callback(
+ new ERR_ACCESS_DENIED(
+ "fchown API is disabled when Permission Model is enabled."
+ )
+ );
+ return;
+ }
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.fchown(fd, uid, gid, req);
+}
+
+/**
+ * Synchronously sets the owner of the file.
+ * @param {number} fd
+ * @param {number} uid
+ * @param {number} gid
+ * @returns {void}
+ */
+function fchownSync(fd, uid, gid) {
+ validateInteger(uid, "uid", -1, kMaxUserId);
+ validateInteger(gid, "gid", -1, kMaxUserId);
+ if (permission.isEnabled()) {
+ throw new ERR_ACCESS_DENIED(
+ "fchown API is disabled when Permission Model is enabled."
+ );
+ }
+
+ binding.fchown(fd, uid, gid);
+}
+
+/**
+ * Asynchronously changes the owner and group
+ * of a file.
+ * @param {string | Buffer | URL} path
+ * @param {number} uid
+ * @param {number} gid
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function chown(path, uid, gid, callback) {
+ callback = makeCallback(callback);
+ path = getValidatedPath(path);
+ validateInteger(uid, "uid", -1, kMaxUserId);
+ validateInteger(gid, "gid", -1, kMaxUserId);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.chown(path, uid, gid, req);
+}
+
+/**
+ * Synchronously changes the owner and group
+ * of a file.
+ * @param {string | Buffer | URL} path
+ * @param {number} uid
+ * @param {number} gid
+ * @returns {void}
+ */
+function chownSync(path, uid, gid) {
+ path = getValidatedPath(path);
+ validateInteger(uid, "uid", -1, kMaxUserId);
+ validateInteger(gid, "gid", -1, kMaxUserId);
+ binding.chown(path, uid, gid);
+}
+
+/**
+ * Changes the file system timestamps of the object
+ * referenced by `path`.
+ * @param {string | Buffer | URL} path
+ * @param {number | string | Date} atime
+ * @param {number | string | Date} mtime
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function utimes(path, atime, mtime, callback) {
+ callback = makeCallback(callback);
+ path = getValidatedPath(path);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.utimes(path, toUnixTimestamp(atime), toUnixTimestamp(mtime), req);
+}
+
+/**
+ * Synchronously changes the file system timestamps
+ * of the object referenced by `path`.
+ * @param {string | Buffer | URL} path
+ * @param {number | string | Date} atime
+ * @param {number | string | Date} mtime
+ * @returns {void}
+ */
+function utimesSync(path, atime, mtime) {
+ binding.utimes(
+ getValidatedPath(path),
+ toUnixTimestamp(atime),
+ toUnixTimestamp(mtime)
+ );
+}
+
+/**
+ * Changes the file system timestamps of the object
+ * referenced by the supplied `fd` (file descriptor).
+ * @param {number} fd
+ * @param {number | string | Date} atime
+ * @param {number | string | Date} mtime
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function futimes(fd, atime, mtime, callback) {
+ atime = toUnixTimestamp(atime, "atime");
+ mtime = toUnixTimestamp(mtime, "mtime");
+ callback = makeCallback(callback);
+
+ if (permission.isEnabled()) {
+ callback(
+ new ERR_ACCESS_DENIED(
+ "futimes API is disabled when Permission Model is enabled."
+ )
+ );
+ return;
+ }
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.futimes(fd, atime, mtime, req);
+}
+
+/**
+ * Synchronously changes the file system timestamps
+ * of the object referenced by the
+ * supplied `fd` (file descriptor).
+ * @param {number} fd
+ * @param {number | string | Date} atime
+ * @param {number | string | Date} mtime
+ * @returns {void}
+ */
+function futimesSync(fd, atime, mtime) {
+ if (permission.isEnabled()) {
+ throw new ERR_ACCESS_DENIED(
+ "futimes API is disabled when Permission Model is enabled."
+ );
+ }
+
+ binding.futimes(
+ fd,
+ toUnixTimestamp(atime, "atime"),
+ toUnixTimestamp(mtime, "mtime")
+ );
+}
+
+/**
+ * Changes the access and modification times of
+ * a file in the same way as `fs.utimes()`.
+ * @param {string | Buffer | URL} path
+ * @param {number | string | Date} atime
+ * @param {number | string | Date} mtime
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function lutimes(path, atime, mtime, callback) {
+ callback = makeCallback(callback);
+ path = getValidatedPath(path);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.lutimes(path, toUnixTimestamp(atime), toUnixTimestamp(mtime), req);
+}
+
+/**
+ * Synchronously changes the access and modification
+ * times of a file in the same way as `fs.utimesSync()`.
+ * @param {string | Buffer | URL} path
+ * @param {number | string | Date} atime
+ * @param {number | string | Date} mtime
+ * @returns {void}
+ */
+function lutimesSync(path, atime, mtime) {
+ binding.lutimes(
+ getValidatedPath(path),
+ toUnixTimestamp(atime),
+ toUnixTimestamp(mtime)
+ );
+}
+
+function writeAll(
+ fd,
+ isUserFd,
+ buffer,
+ offset,
+ length,
+ signal,
+ flush,
+ callback
+) {
+ if (signal?.aborted) {
+ const abortError = new AbortError(undefined, { cause: signal.reason });
+ if (isUserFd) {
+ callback(abortError);
+ } else {
+ fs.close(fd, (err) => {
+ callback(aggregateTwoErrors(err, abortError));
+ });
+ }
+ return;
+ }
+ // write(fd, buffer, offset, length, position, callback)
+ fs.write(fd, buffer, offset, length, null, (writeErr, written) => {
+ if (writeErr) {
+ if (isUserFd) {
+ callback(writeErr);
+ } else {
+ fs.close(fd, (err) => {
+ callback(aggregateTwoErrors(err, writeErr));
+ });
+ }
+ } else if (written === length) {
+ if (!flush) {
+ if (isUserFd) {
+ callback(null);
+ } else {
+ fs.close(fd, callback);
+ }
+ } else {
+ fs.fsync(fd, (syncErr) => {
+ if (syncErr) {
+ if (isUserFd) {
+ callback(syncErr);
+ } else {
+ fs.close(fd, (err) => {
+ callback(aggregateTwoErrors(err, syncErr));
+ });
+ }
+ } else if (isUserFd) {
+ callback(null);
+ } else {
+ fs.close(fd, callback);
+ }
+ });
+ }
+ } else {
+ offset += written;
+ length -= written;
+ writeAll(fd, isUserFd, buffer, offset, length, signal, flush, callback);
+ }
+ });
+}
+
+/**
+ * Asynchronously writes data to the file.
+ * @param {string | Buffer | URL | number} path
+ * @param {string | Buffer | TypedArray | DataView} data
+ * @param {{
+ * encoding?: string | null;
+ * mode?: number;
+ * flag?: string;
+ * signal?: AbortSignal;
+ * flush?: boolean;
+ * } | string} [options]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function writeFile(path, data, options, callback) {
+ callback ||= options;
+ validateFunction(callback, "cb");
+ options = getOptions(options, {
+ encoding: "utf8",
+ mode: 0o666,
+ flag: "w",
+ flush: false,
+ });
+ const flag = options.flag || "w";
+ const flush = options.flush ?? false;
+
+ validateBoolean(flush, "options.flush");
+
+ if (!isArrayBufferView(data)) {
+ validateStringAfterArrayBufferView(data, "data");
+ data = Buffer.from(data, options.encoding || "utf8");
+ }
+
+ if (isFd(path)) {
+ const isUserFd = true;
+ const signal = options.signal;
+ writeAll(path, isUserFd, data, 0, data.byteLength, signal, flush, callback);
+ return;
+ }
+
+ if (checkAborted(options.signal, callback)) return;
+
+ fs.open(path, flag, options.mode, (openErr, fd) => {
+ if (openErr) {
+ callback(openErr);
+ } else {
+ const isUserFd = false;
+ const signal = options.signal;
+ writeAll(fd, isUserFd, data, 0, data.byteLength, signal, flush, callback);
+ }
+ });
+}
+
+/**
+ * Synchronously writes data to the file.
+ * @param {string | Buffer | URL | number} path
+ * @param {string | Buffer | TypedArray | DataView} data
+ * @param {{
+ * encoding?: string | null;
+ * mode?: number;
+ * flag?: string;
+ * flush?: boolean;
+ * } | string} [options]
+ * @returns {void}
+ */
+function writeFileSync(path, data, options) {
+ options = getOptions(options, {
+ encoding: "utf8",
+ mode: 0o666,
+ flag: "w",
+ flush: false,
+ });
+
+ const flush = options.flush ?? false;
+
+ validateBoolean(flush, "options.flush");
+
+ const flag = options.flag || "w";
+
+ // C++ fast path for string data and UTF8 encoding
+ if (
+ typeof data === "string" &&
+ (options.encoding === "utf8" || options.encoding === "utf-8")
+ ) {
+ if (!isInt32(path)) {
+ path = getValidatedPath(path);
+ }
+
+ return binding.writeFileUtf8(
+ path,
+ data,
+ stringToFlags(flag),
+ parseFileMode(options.mode, "mode", 0o666)
+ );
+ }
+
+ if (!isArrayBufferView(data)) {
+ validateStringAfterArrayBufferView(data, "data");
+ data = Buffer.from(data, options.encoding || "utf8");
+ }
+
+ const isUserFd = isFd(path); // File descriptor ownership
+ const fd = isUserFd ? path : fs.openSync(path, flag, options.mode);
+
+ let offset = 0;
+ let length = data.byteLength;
+ try {
+ while (length > 0) {
+ const written = fs.writeSync(fd, data, offset, length);
+ offset += written;
+ length -= written;
+ }
+
+ if (flush) {
+ fs.fsyncSync(fd);
+ }
+ } finally {
+ if (!isUserFd) fs.closeSync(fd);
+ }
+}
+
+/**
+ * Asynchronously appends data to a file.
+ * @param {string | Buffer | URL | number} path
+ * @param {string | Buffer} data
+ * @param {{
+ * encoding?: string | null;
+ * mode?: number;
+ * flag?: string;
+ * flush?: boolean;
+ * } | string} [options]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function appendFile(path, data, options, callback) {
+ callback ||= options;
+ validateFunction(callback, "cb");
+ options = getOptions(options, { encoding: "utf8", mode: 0o666, flag: "a" });
+
+ // Don't make changes directly on options object
+ options = copyObject(options);
+
+ // Force append behavior when using a supplied file descriptor
+ if (!options.flag || isFd(path)) options.flag = "a";
+
+ fs.writeFile(path, data, options, callback);
+}
+
+/**
+ * Synchronously appends data to a file.
+ * @param {string | Buffer | URL | number} path
+ * @param {string | Buffer} data
+ * @param {{
+ * encoding?: string | null;
+ * mode?: number;
+ * flag?: string;
+ * } | string} [options]
+ * @returns {void}
+ */
+function appendFileSync(path, data, options) {
+ options = getOptions(options, { encoding: "utf8", mode: 0o666, flag: "a" });
+
+ // Don't make changes directly on options object
+ options = copyObject(options);
+
+ // Force append behavior when using a supplied file descriptor
+ if (!options.flag || isFd(path)) options.flag = "a";
+
+ fs.writeFileSync(path, data, options);
+}
+
+/**
+ * Watches for the changes on `filename`.
+ * @param {string | Buffer | URL} filename
+ * @param {string | {
+ * persistent?: boolean;
+ * recursive?: boolean;
+ * encoding?: string;
+ * signal?: AbortSignal;
+ * }} [options]
+ * @param {(
+ * eventType?: string,
+ * filename?: string | Buffer
+ * ) => any} [listener]
+ * @returns {watchers.FSWatcher}
+ */
+function watch(filename, options, listener) {
+ if (typeof options === "function") {
+ listener = options;
+ }
+ options = getOptions(options);
+
+ // Don't make changes directly on options object
+ options = copyObject(options);
+
+ if (options.persistent === undefined) options.persistent = true;
+ if (options.recursive === undefined) options.recursive = false;
+
+ let watcher;
+ const watchers = require("internal/fs/watchers");
+ const path = getValidatedPath(filename);
+ // TODO(anonrig): Remove non-native watcher when/if libuv supports recursive.
+ // As of November 2022, libuv does not support recursive file watch on all platforms,
+ // e.g. Linux due to the limitations of inotify.
+ if (options.recursive && !isMacOS && !isWindows) {
+ const nonNativeWatcher = require("internal/fs/recursive_watch");
+ watcher = new nonNativeWatcher.FSWatcher(options);
+ watcher[watchers.kFSWatchStart](path);
+ } else {
+ watcher = new watchers.FSWatcher();
+ watcher[watchers.kFSWatchStart](
+ path,
+ options.persistent,
+ options.recursive,
+ options.encoding
+ );
+ }
+
+ if (listener) {
+ watcher.addListener("change", listener);
+ }
+ if (options.signal) {
+ if (options.signal.aborted) {
+ process.nextTick(() => watcher.close());
+ } else {
+ const listener = () => watcher.close();
+ kResistStopPropagation ??=
+ require("internal/event_target").kResistStopPropagation;
+ options.signal.addEventListener("abort", listener, {
+ __proto__: null,
+ [kResistStopPropagation]: true,
+ });
+ watcher.once("close", () => {
+ options.signal.removeEventListener("abort", listener);
+ });
+ }
+ }
+
+ return watcher;
+}
+
+const statWatchers = new SafeMap();
+
+/**
+ * Watches for changes on `filename`.
+ * @param {string | Buffer | URL} filename
+ * @param {{
+ * bigint?: boolean;
+ * persistent?: boolean;
+ * interval?: number;
+ * }} [options]
+ * @param {(
+ * current?: Stats,
+ * previous?: Stats
+ * ) => any} listener
+ * @returns {watchers.StatWatcher}
+ */
+function watchFile(filename, options, listener) {
+ filename = getValidatedPath(filename);
+ filename = pathModule.resolve(filename);
+ let stat;
+
+ if (options === null || typeof options !== "object") {
+ listener = options;
+ options = null;
+ }
+
+ options = {
+ // Poll interval in milliseconds. 5007 is what libev used to use. It's
+ // a little on the slow side but let's stick with it for now to keep
+ // behavioral changes to a minimum.
+ interval: 5007,
+ persistent: true,
+ ...options,
+ };
+
+ validateFunction(listener, "listener");
+
+ stat = statWatchers.get(filename);
+ const watchers = require("internal/fs/watchers");
+ if (stat === undefined) {
+ stat = new watchers.StatWatcher(options.bigint);
+ stat[watchers.kFSStatWatcherStart](
+ filename,
+ options.persistent,
+ options.interval
+ );
+ statWatchers.set(filename, stat);
+ } else {
+ stat[watchers.kFSStatWatcherAddOrCleanRef]("add");
+ }
+
+ stat.addListener("change", listener);
+ return stat;
+}
+
+/**
+ * Stops watching for changes on `filename`.
+ * @param {string | Buffer | URL} filename
+ * @param {() => any} [listener]
+ * @returns {void}
+ */
+function unwatchFile(filename, listener) {
+ filename = getValidatedPath(filename);
+ filename = pathModule.resolve(filename);
+ const stat = statWatchers.get(filename);
+
+ if (stat === undefined) return;
+ const watchers = require("internal/fs/watchers");
+ if (typeof listener === "function") {
+ const beforeListenerCount = stat.listenerCount("change");
+ stat.removeListener("change", listener);
+ if (stat.listenerCount("change") < beforeListenerCount)
+ stat[watchers.kFSStatWatcherAddOrCleanRef]("clean");
+ } else {
+ stat.removeAllListeners("change");
+ stat[watchers.kFSStatWatcherAddOrCleanRef]("cleanAll");
+ }
+
+ if (stat.listenerCount("change") === 0) {
+ stat.stop();
+ statWatchers.delete(filename);
+ }
+}
+
+let splitRoot;
+if (isWindows) {
+ // Regex to find the device root on Windows (e.g. 'c:\\'), including trailing
+ // slash.
+ const splitRootRe = /^(?:[a-zA-Z]:|[\\/]{2}[^\\/]+[\\/][^\\/]+)?[\\/]*/;
+ splitRoot = function splitRoot(str) {
+ return SideEffectFreeRegExpPrototypeExec(splitRootRe, str)[0];
+ };
+} else {
+ splitRoot = function splitRoot(str) {
+ for (let i = 0; i < str.length; ++i) {
+ if (StringPrototypeCharCodeAt(str, i) !== CHAR_FORWARD_SLASH)
+ return StringPrototypeSlice(str, 0, i);
+ }
+ return str;
+ };
+}
+
+function encodeRealpathResult(result, options) {
+ if (!options || !options.encoding || options.encoding === "utf8")
+ return result;
+ const asBuffer = Buffer.from(result);
+ if (options.encoding === "buffer") {
+ return asBuffer;
+ }
+ return asBuffer.toString(options.encoding);
+}
+
+// Finds the next portion of a (partial) path, up to the next path delimiter
+let nextPart;
+if (isWindows) {
+ nextPart = function nextPart(p, i) {
+ for (; i < p.length; ++i) {
+ const ch = StringPrototypeCharCodeAt(p, i);
+
+ // Check for a separator character
+ if (ch === CHAR_BACKWARD_SLASH || ch === CHAR_FORWARD_SLASH) return i;
+ }
+ return -1;
+ };
+} else {
+ nextPart = function nextPart(p, i) {
+ return StringPrototypeIndexOf(p, "/", i);
+ };
+}
+
+/**
+ * Returns the resolved pathname.
+ * @param {string | Buffer | URL} p
+ * @param {string | { encoding?: string | null; }} [options]
+ * @returns {string | Buffer}
+ */
+function realpathSync(p, options) {
+ options = getOptions(options);
+ p = toPathIfFileURL(p);
+ if (typeof p !== "string") {
+ p += "";
+ }
+ validatePath(p);
+ p = pathModule.resolve(p);
+
+ const cache = options[realpathCacheKey];
+ const maybeCachedResult = cache?.get(p);
+ if (maybeCachedResult) {
+ return maybeCachedResult;
+ }
+
+ const seenLinks = new SafeMap();
+ const knownHard = new SafeSet();
+ const original = p;
+
+ // Current character position in p
+ let pos;
+ // The partial path so far, including a trailing slash if any
+ let current;
+ // The partial path without a trailing slash (except when pointing at a root)
+ let base;
+ // The partial path scanned in the previous round, with slash
+ let previous;
+
+ // Skip over roots
+ current = base = splitRoot(p);
+ pos = current.length;
+
+ // On windows, check that the root exists. On unix there is no need.
+ if (isWindows) {
+ const out = binding.lstat(
+ base,
+ false,
+ undefined,
+ true /* throwIfNoEntry */
+ );
+ if (out === undefined) {
+ return;
+ }
+ knownHard.add(base);
+ }
+
+ // Walk down the path, swapping out linked path parts for their real
+ // values
+ // NB: p.length changes.
+ while (pos < p.length) {
+ // find the next part
+ const result = nextPart(p, pos);
+ previous = current;
+ if (result === -1) {
+ const last = StringPrototypeSlice(p, pos);
+ current += last;
+ base = previous + last;
+ pos = p.length;
+ } else {
+ current += StringPrototypeSlice(p, pos, result + 1);
+ base = previous + StringPrototypeSlice(p, pos, result);
+ pos = result + 1;
+ }
+
+ // Continue if not a symlink, break if a pipe/socket
+ if (knownHard.has(base) || cache?.get(base) === base) {
+ if (isFileType(statValues, S_IFIFO) || isFileType(statValues, S_IFSOCK)) {
+ break;
+ }
+ continue;
+ }
+
+ let resolvedLink;
+ const maybeCachedResolved = cache?.get(base);
+ if (maybeCachedResolved) {
+ resolvedLink = maybeCachedResolved;
+ } else {
+ // Use stats array directly to avoid creating an fs.Stats instance just
+ // for our internal use.
+
+ const stats = binding.lstat(
+ base,
+ true,
+ undefined,
+ true /* throwIfNoEntry */
+ );
+ if (stats === undefined) {
+ return;
+ }
+
+ if (!isFileType(stats, S_IFLNK)) {
+ knownHard.add(base);
+ cache?.set(base, base);
+ continue;
+ }
+
+ // Read the link if it wasn't read before
+ // dev/ino always return 0 on windows, so skip the check.
+ let linkTarget = null;
+ let id;
+ if (!isWindows) {
+ const dev = BigIntPrototypeToString(stats[0], 32);
+ const ino = BigIntPrototypeToString(stats[7], 32);
+ id = `${dev}:${ino}`;
+ if (seenLinks.has(id)) {
+ linkTarget = seenLinks.get(id);
+ }
+ }
+ if (linkTarget === null) {
+ binding.stat(base, false, undefined, true);
+ linkTarget = binding.readlink(base, undefined);
+ }
+ resolvedLink = pathModule.resolve(previous, linkTarget);
+
+ cache?.set(base, resolvedLink);
+ if (!isWindows) seenLinks.set(id, linkTarget);
+ }
+
+ // Resolve the link, then start over
+ p = pathModule.resolve(resolvedLink, StringPrototypeSlice(p, pos));
+
+ // Skip over roots
+ current = base = splitRoot(p);
+ pos = current.length;
+
+ // On windows, check that the root exists. On unix there is no need.
+ if (isWindows && !knownHard.has(base)) {
+ const out = binding.lstat(
+ base,
+ false,
+ undefined,
+ true /* throwIfNoEntry */
+ );
+ if (out === undefined) {
+ return;
+ }
+ knownHard.add(base);
+ }
+ }
+
+ cache?.set(original, p);
+ return encodeRealpathResult(p, options);
+}
+
+/**
+ * Returns the resolved pathname.
+ * @param {string | Buffer | URL} path
+ * @param {string | { encoding?: string; }} [options]
+ * @returns {string | Buffer}
+ */
+realpathSync.native = (path, options) => {
+ options = getOptions(options);
+ return binding.realpath(getValidatedPath(path), options.encoding);
+};
+
+/**
+ * Asynchronously computes the canonical pathname by
+ * resolving `.`, `..` and symbolic links.
+ * @param {string | Buffer | URL} p
+ * @param {string | { encoding?: string; }} [options]
+ * @param {(
+ * err?: Error,
+ * resolvedPath?: string | Buffer
+ * ) => any} callback
+ * @returns {void}
+ */
+function realpath(p, options, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ } else {
+ validateFunction(callback, "cb");
+ }
+ options = getOptions(options);
+ p = toPathIfFileURL(p);
+
+ if (typeof p !== "string") {
+ p += "";
+ }
+ validatePath(p);
+ p = pathModule.resolve(p);
+
+ const seenLinks = new SafeMap();
+ const knownHard = new SafeSet();
+
+ // Current character position in p
+ let pos;
+ // The partial path so far, including a trailing slash if any
+ let current;
+ // The partial path without a trailing slash (except when pointing at a root)
+ let base;
+ // The partial path scanned in the previous round, with slash
+ let previous;
+
+ current = base = splitRoot(p);
+ pos = current.length;
+
+ // On windows, check that the root exists. On unix there is no need.
+ if (isWindows && !knownHard.has(base)) {
+ fs.lstat(base, (err) => {
+ if (err) return callback(err);
+ knownHard.add(base);
+ LOOP();
+ });
+ } else {
+ process.nextTick(LOOP);
+ }
+
+ // Walk down the path, swapping out linked path parts for their real
+ // values
+ function LOOP() {
+ // Stop if scanned past end of path
+ if (pos >= p.length) {
+ return callback(null, encodeRealpathResult(p, options));
+ }
+
+ // find the next part
+ const result = nextPart(p, pos);
+ previous = current;
+ if (result === -1) {
+ const last = StringPrototypeSlice(p, pos);
+ current += last;
+ base = previous + last;
+ pos = p.length;
+ } else {
+ current += StringPrototypeSlice(p, pos, result + 1);
+ base = previous + StringPrototypeSlice(p, pos, result);
+ pos = result + 1;
+ }
+
+ // Continue if not a symlink, break if a pipe/socket
+ if (knownHard.has(base)) {
+ if (isFileType(statValues, S_IFIFO) || isFileType(statValues, S_IFSOCK)) {
+ return callback(null, encodeRealpathResult(p, options));
+ }
+ return process.nextTick(LOOP);
+ }
+
+ return fs.lstat(base, { bigint: true }, gotStat);
+ }
+
+ function gotStat(err, stats) {
+ if (err) return callback(err);
+
+ // If not a symlink, skip to the next path part
+ if (!stats.isSymbolicLink()) {
+ knownHard.add(base);
+ return process.nextTick(LOOP);
+ }
+
+ // Stat & read the link if not read before.
+ // Call `gotTarget()` as soon as the link target is known.
+ // `dev`/`ino` always return 0 on windows, so skip the check.
+ let id;
+ if (!isWindows) {
+ const dev = BigIntPrototypeToString(stats.dev, 32);
+ const ino = BigIntPrototypeToString(stats.ino, 32);
+ id = `${dev}:${ino}`;
+ if (seenLinks.has(id)) {
+ return gotTarget(null, seenLinks.get(id));
+ }
+ }
+ fs.stat(base, (err) => {
+ if (err) return callback(err);
+
+ fs.readlink(base, (err, target) => {
+ if (!isWindows) seenLinks.set(id, target);
+ gotTarget(err, target);
+ });
+ });
+ }
+
+ function gotTarget(err, target) {
+ if (err) return callback(err);
+
+ gotResolvedLink(pathModule.resolve(previous, target));
+ }
+
+ function gotResolvedLink(resolvedLink) {
+ // Resolve the link, then start over
+ p = pathModule.resolve(resolvedLink, StringPrototypeSlice(p, pos));
+ current = base = splitRoot(p);
+ pos = current.length;
+
+ // On windows, check that the root exists. On unix there is no need.
+ if (isWindows && !knownHard.has(base)) {
+ fs.lstat(base, (err) => {
+ if (err) return callback(err);
+ knownHard.add(base);
+ LOOP();
+ });
+ } else {
+ process.nextTick(LOOP);
+ }
+ }
+}
+
+/**
+ * Asynchronously computes the canonical pathname by
+ * resolving `.`, `..` and symbolic links.
+ * @param {string | Buffer | URL} path
+ * @param {string | { encoding?: string; }} [options]
+ * @param {(
+ * err?: Error,
+ * resolvedPath?: string | Buffer
+ * ) => any} callback
+ * @returns {void}
+ */
+realpath.native = (path, options, callback) => {
+ callback = makeCallback(callback || options);
+ options = getOptions(options);
+ path = getValidatedPath(path);
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.realpath(path, options.encoding, req);
+};
+
+/**
+ * Creates a unique temporary directory.
+ * @param {string | Buffer | URL} prefix
+ * @param {string | { encoding?: string; }} [options]
+ * @param {(
+ * err?: Error,
+ * directory?: string
+ * ) => any} callback
+ * @returns {void}
+ */
+function mkdtemp(prefix, options, callback) {
+ callback = makeCallback(typeof options === "function" ? options : callback);
+ options = getOptions(options);
+
+ prefix = getValidatedPath(prefix, "prefix");
+ warnOnNonPortableTemplate(prefix);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.mkdtemp(prefix, options.encoding, req);
+}
+
+/**
+ * Synchronously creates a unique temporary directory.
+ * @param {string | Buffer | URL} prefix
+ * @param {string | { encoding?: string; }} [options]
+ * @returns {string}
+ */
+function mkdtempSync(prefix, options) {
+ options = getOptions(options);
+
+ prefix = getValidatedPath(prefix, "prefix");
+ warnOnNonPortableTemplate(prefix);
+ return binding.mkdtemp(prefix, options.encoding);
+}
+
+/**
+ * Synchronously creates a unique temporary directory.
+ * The returned value is a disposable object which removes the
+ * directory and its contents when disposed.
+ * @param {string | Buffer | URL} prefix
+ * @param {string | { encoding?: string; }} [options]
+ * @returns {object} A disposable object with a "path" property.
+ */
+function mkdtempDisposableSync(prefix, options) {
+ options = getOptions(options);
+
+ prefix = getValidatedPath(prefix, "prefix");
+ warnOnNonPortableTemplate(prefix);
+
+ const path = binding.mkdtemp(prefix, options.encoding);
+ // Stash the full path in case of process.chdir()
+ const fullPath = pathModule.resolve(process.cwd(), path);
+
+ const remove = () => {
+ binding.rmSync(
+ fullPath,
+ 0 /* maxRetries */,
+ true /* recursive */,
+ 100 /* retryDelay */
+ );
+ };
+ return {
+ path,
+ remove,
+ [SymbolDispose]() {
+ remove();
+ },
+ };
+}
+
+/**
+ * Asynchronously copies `src` to `dest`. By
+ * default, `dest` is overwritten if it already exists.
+ * @param {string | Buffer | URL} src
+ * @param {string | Buffer | URL} dest
+ * @param {number} [mode]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function copyFile(src, dest, mode, callback) {
+ if (typeof mode === "function") {
+ callback = mode;
+ mode = 0;
+ }
+
+ src = getValidatedPath(src, "src");
+ dest = getValidatedPath(dest, "dest");
+ callback = makeCallback(callback);
+
+ const req = new FSReqCallback();
+ req.oncomplete = callback;
+ binding.copyFile(src, dest, mode, req);
+}
+
+/**
+ * Synchronously copies `src` to `dest`. By
+ * default, `dest` is overwritten if it already exists.
+ * @param {string | Buffer | URL} src
+ * @param {string | Buffer | URL} dest
+ * @param {number} [mode]
+ * @returns {void}
+ */
+function copyFileSync(src, dest, mode) {
+ binding.copyFile(
+ getValidatedPath(src, "src"),
+ getValidatedPath(dest, "dest"),
+ mode
+ );
+}
+
+/**
+ * Asynchronously copies `src` to `dest`. `src` can be a file, directory, or
+ * symlink. The contents of directories will be copied recursively.
+ * @param {string | URL} src
+ * @param {string | URL} dest
+ * @param {object} [options]
+ * @param {(err?: Error) => any} callback
+ * @returns {void}
+ */
+function cp(src, dest, options, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = undefined;
+ }
+ callback = makeCallback(callback);
+ options = validateCpOptions(options);
+ src = getValidatedPath(src, "src");
+ dest = getValidatedPath(dest, "dest");
+ lazyLoadCp();
+ cpFn(src, dest, options, callback);
+}
+
+/**
+ * Synchronously copies `src` to `dest`. `src` can be a file, directory, or
+ * symlink. The contents of directories will be copied recursively.
+ * @param {string | URL} src
+ * @param {string | URL} dest
+ * @param {object} [options]
+ * @returns {void}
+ */
+function cpSync(src, dest, options) {
+ options = validateCpOptions(options);
+ src = getValidatedPath(src, "src");
+ dest = getValidatedPath(dest, "dest");
+ lazyLoadCp();
+ cpSyncFn(src, dest, options);
+}
+
+function lazyLoadStreams() {
+ if (!ReadStream) {
+ ({ ReadStream, WriteStream } = require("internal/fs/streams"));
+ FileReadStream = ReadStream;
+ FileWriteStream = WriteStream;
+ }
+}
+
+/**
+ * Creates a readable stream with a default `highWaterMark`
+ * of 64 KiB.
+ * @param {string | Buffer | URL} path
+ * @param {string | {
+ * flags?: string;
+ * encoding?: string;
+ * fd?: number | FileHandle;
+ * mode?: number;
+ * autoClose?: boolean;
+ * emitClose?: boolean;
+ * start: number;
+ * end?: number;
+ * highWaterMark?: number;
+ * fs?: object | null;
+ * signal?: AbortSignal | null;
+ * }} [options]
+ * @returns {ReadStream}
+ */
+function createReadStream(path, options) {
+ lazyLoadStreams();
+ return new ReadStream(path, options);
+}
+
+/**
+ * Creates a write stream.
+ * @param {string | Buffer | URL} path
+ * @param {string | {
+ * flags?: string;
+ * encoding?: string;
+ * fd?: number | FileHandle;
+ * mode?: number;
+ * autoClose?: boolean;
+ * emitClose?: boolean;
+ * start: number;
+ * fs?: object | null;
+ * signal?: AbortSignal | null;
+ * highWaterMark?: number;
+ * flush?: boolean;
+ * }} [options]
+ * @returns {WriteStream}
+ */
+function createWriteStream(path, options) {
+ lazyLoadStreams();
+ return new WriteStream(path, options);
+}
+
+const lazyGlob = getLazy(() => require("internal/fs/glob").Glob);
+
+function glob(pattern, options, callback) {
+ if (typeof options === "function") {
+ callback = options;
+ options = undefined;
+ }
+ callback = makeCallback(callback);
+
+ const Glob = lazyGlob();
+ PromisePrototypeThen(
+ ArrayFromAsync(new Glob(pattern, options).glob()),
+ (res) => callback(null, res),
+ callback
+ );
+}
+
+function globSync(pattern, options) {
+ const Glob = lazyGlob();
+ return new Glob(pattern, options).globSync();
+}
+
+module.exports = fs = {
+ appendFile,
+ appendFileSync,
+ access,
+ accessSync,
+ chown,
+ chownSync,
+ chmod,
+ chmodSync,
+ close,
+ closeSync,
+ copyFile,
+ copyFileSync,
+ cp,
+ cpSync,
+ createReadStream,
+ createWriteStream,
+ exists,
+ existsSync,
+ fchown,
+ fchownSync,
+ fchmod,
+ fchmodSync,
+ fdatasync,
+ fdatasyncSync,
+ fstat,
+ fstatSync,
+ fsync,
+ fsyncSync,
+ ftruncate,
+ ftruncateSync,
+ futimes,
+ futimesSync,
+ glob,
+ globSync,
+ lchown,
+ lchownSync,
+ lchmod: constants.O_SYMLINK !== undefined ? lchmod : undefined,
+ lchmodSync: constants.O_SYMLINK !== undefined ? lchmodSync : undefined,
+ link,
+ linkSync,
+ lstat,
+ lstatSync,
+ lutimes,
+ lutimesSync,
+ mkdir,
+ mkdirSync,
+ mkdtemp,
+ mkdtempSync,
+ mkdtempDisposableSync,
+ open,
+ openSync,
+ openAsBlob,
+ readdir,
+ readdirSync,
+ read,
+ readSync,
+ readv,
+ readvSync,
+ readFile,
+ readFileSync,
+ readlink,
+ readlinkSync,
+ realpath,
+ realpathSync,
+ rename,
+ renameSync,
+ rm,
+ rmSync,
+ rmdir,
+ rmdirSync,
+ stat,
+ statfs,
+ statSync,
+ statfsSync,
+ symlink,
+ symlinkSync,
+ truncate,
+ truncateSync,
+ unwatchFile,
+ unlink,
+ unlinkSync,
+ utimes,
+ utimesSync,
+ watch,
+ watchFile,
+ writeFile,
+ writeFileSync,
+ write,
+ writeSync,
+ writev,
+ writevSync,
+ Dirent,
+ Stats,
+
+ get ReadStream() {
+ lazyLoadStreams();
+ return ReadStream;
+ },
+
+ set ReadStream(val) {
+ ReadStream = val;
+ },
+
+ get WriteStream() {
+ lazyLoadStreams();
+ return WriteStream;
+ },
+
+ set WriteStream(val) {
+ WriteStream = val;
+ },
+
+ // Legacy names... these have to be separate because of how graceful-fs
+ // (and possibly other) modules monkey patch the values.
+ get FileReadStream() {
+ lazyLoadStreams();
+ return FileReadStream;
+ },
+
+ set FileReadStream(val) {
+ FileReadStream = val;
+ },
+
+ get FileWriteStream() {
+ lazyLoadStreams();
+ return FileWriteStream;
+ },
+
+ set FileWriteStream(val) {
+ FileWriteStream = val;
+ },
+
+ get Utf8Stream() {
+ lazyLoadUtf8Stream();
+ return Utf8Stream;
+ },
+
+ // For tests
+ _toUnixTimestamp: toUnixTimestamp,
+};
+
+defineLazyProperties(fs, "internal/fs/dir", ["Dir", "opendir", "opendirSync"]);
+
+ObjectDefineProperties(fs, {
+ constants: {
+ __proto__: null,
+ configurable: false,
+ enumerable: true,
+ value: constants,
+ },
+ promises: {
+ __proto__: null,
+ configurable: true,
+ enumerable: true,
+ get() {
+ promises ??= require("internal/fs/promises").exports;
+ return promises;
+ },
+ },
+});
diff --git a/.codesandbox/node/inspector.js b/.codesandbox/node/inspector.js
new file mode 100644
index 0000000..0cc4123
--- /dev/null
+++ b/.codesandbox/node/inspector.js
@@ -0,0 +1,240 @@
+"use strict";
+
+const { JSONParse, JSONStringify, SafeMap, SymbolDispose } = primordials;
+
+const {
+ ERR_INSPECTOR_ALREADY_ACTIVATED,
+ ERR_INSPECTOR_ALREADY_CONNECTED,
+ ERR_INSPECTOR_CLOSED,
+ ERR_INSPECTOR_COMMAND,
+ ERR_INSPECTOR_NOT_AVAILABLE,
+ ERR_INSPECTOR_NOT_CONNECTED,
+ ERR_INSPECTOR_NOT_ACTIVE,
+ ERR_INSPECTOR_NOT_WORKER,
+} = require("internal/errors").codes;
+
+const { isLoopback } = require("internal/net");
+
+const { hasInspector } = internalBinding("config");
+if (!hasInspector) throw new ERR_INSPECTOR_NOT_AVAILABLE();
+
+const EventEmitter = require("events");
+const { queueMicrotask } = require("internal/process/task_queues");
+const { kEmptyObject } = require("internal/util");
+const {
+ isUint32,
+ validateFunction,
+ validateInt32,
+ validateObject,
+ validateString,
+} = require("internal/validators");
+const { isMainThread } = require("worker_threads");
+const { _debugEnd } = internalBinding("process_methods");
+const { put } = require("internal/inspector/network_resources");
+
+const {
+ Connection,
+ MainThreadConnection,
+ open,
+ url,
+ isEnabled,
+ waitForDebugger,
+ console,
+ emitProtocolEvent,
+} = internalBinding("inspector");
+
+class Session extends EventEmitter {
+ #connection = null;
+ #nextId = 1;
+ #messageCallbacks = new SafeMap();
+
+ /**
+ * Connects the session to the inspector back-end.
+ * @returns {void}
+ */
+ connect() {
+ if (this.#connection)
+ throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session");
+ this.#connection = new Connection((message) => this.#onMessage(message));
+ }
+
+ /**
+ * Connects the session to the main thread
+ * inspector back-end.
+ * @returns {void}
+ */
+ connectToMainThread() {
+ if (isMainThread) throw new ERR_INSPECTOR_NOT_WORKER();
+ if (this.#connection)
+ throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session");
+ this.#connection = new MainThreadConnection((message) =>
+ queueMicrotask(() => this.#onMessage(message))
+ );
+ }
+
+ #onMessage(message) {
+ const parsed = JSONParse(message);
+ try {
+ if (parsed.id) {
+ const callback = this.#messageCallbacks.get(parsed.id);
+ this.#messageCallbacks.delete(parsed.id);
+ if (callback) {
+ if (parsed.error) {
+ return callback(
+ new ERR_INSPECTOR_COMMAND(parsed.error.code, parsed.error.message)
+ );
+ }
+
+ callback(null, parsed.result);
+ }
+ } else {
+ this.emit(parsed.method, parsed);
+ this.emit("inspectorNotification", parsed);
+ }
+ } catch (error) {
+ process.emitWarning(error);
+ }
+ }
+
+ /**
+ * Posts a message to the inspector back-end.
+ * @param {string} method
+ * @param {Record} [params]
+ * @param {Function} [callback]
+ * @returns {void}
+ */
+ post(method, params, callback) {
+ validateString(method, "method");
+ if (!callback && typeof params === "function") {
+ callback = params;
+ params = null;
+ }
+ if (params) {
+ validateObject(params, "params");
+ }
+ if (callback) {
+ validateFunction(callback, "callback");
+ }
+
+ if (!this.#connection) {
+ throw new ERR_INSPECTOR_NOT_CONNECTED();
+ }
+ const id = this.#nextId++;
+ const message = { id, method };
+ if (params) {
+ message.params = params;
+ }
+ if (callback) {
+ this.#messageCallbacks.set(id, callback);
+ }
+ this.#connection.dispatch(JSONStringify(message));
+ }
+
+ /**
+ * Immediately closes the session, all pending
+ * message callbacks will be called with an
+ * error.
+ * @returns {void}
+ */
+ disconnect() {
+ if (!this.#connection) return;
+ this.#connection.disconnect();
+ this.#connection = null;
+ const remainingCallbacks = this.#messageCallbacks.values();
+ for (const callback of remainingCallbacks) {
+ process.nextTick(callback, new ERR_INSPECTOR_CLOSED());
+ }
+ this.#messageCallbacks.clear();
+ this.#nextId = 1;
+ }
+}
+
+/**
+ * Activates inspector on host and port.
+ * @param {number} [port]
+ * @param {string} [host]
+ * @param {boolean} [wait]
+ * @returns {void}
+ */
+function inspectorOpen(port, host, wait) {
+ if (isEnabled()) {
+ throw new ERR_INSPECTOR_ALREADY_ACTIVATED();
+ }
+ // inspectorOpen() currently does not typecheck its arguments and adding
+ // such checks would be a potentially breaking change. However, the native
+ // open() function requires the port to fit into a 16-bit unsigned integer,
+ // causing an integer overflow otherwise, so we at least need to prevent that.
+ if (isUint32(port)) {
+ validateInt32(port, "port", 0, 65535);
+ }
+ if (host && !isLoopback(host)) {
+ process.emitWarning(
+ "Binding the inspector to a public IP with an open port is insecure, " +
+ "as it allows external hosts to connect to the inspector " +
+ "and perform a remote code execution attack. " +
+ "Documentation can be found at " +
+ "https://nodejs.org/api/cli.html#--inspecthostport",
+ "SecurityWarning"
+ );
+ }
+
+ open(port, host);
+ if (wait) waitForDebugger();
+
+ return {
+ __proto__: null,
+ [SymbolDispose]() {
+ _debugEnd();
+ },
+ };
+}
+
+/**
+ * Blocks until a client (existing or connected later)
+ * has sent the `Runtime.runIfWaitingForDebugger`
+ * command.
+ * @returns {void}
+ */
+function inspectorWaitForDebugger() {
+ if (!waitForDebugger()) throw new ERR_INSPECTOR_NOT_ACTIVE();
+}
+
+function broadcastToFrontend(eventName, params = kEmptyObject) {
+ validateString(eventName, "eventName");
+ validateObject(params, "params");
+ emitProtocolEvent(eventName, params);
+}
+
+const Network = {
+ requestWillBeSent: (params) =>
+ broadcastToFrontend("Network.requestWillBeSent", params),
+ responseReceived: (params) =>
+ broadcastToFrontend("Network.responseReceived", params),
+ loadingFinished: (params) =>
+ broadcastToFrontend("Network.loadingFinished", params),
+ loadingFailed: (params) =>
+ broadcastToFrontend("Network.loadingFailed", params),
+ dataSent: (params) => broadcastToFrontend("Network.dataSent", params),
+ dataReceived: (params) => broadcastToFrontend("Network.dataReceived", params),
+ webSocketCreated: (params) =>
+ broadcastToFrontend("Network.webSocketCreated", params),
+ webSocketClosed: (params) =>
+ broadcastToFrontend("Network.webSocketClosed", params),
+ webSocketHandshakeResponseReceived: (params) =>
+ broadcastToFrontend("Network.webSocketHandshakeResponseReceived", params),
+};
+
+const NetworkResources = {
+ put,
+};
+
+module.exports = {
+ open: inspectorOpen,
+ close: _debugEnd,
+ url,
+ waitForDebugger: inspectorWaitForDebugger,
+ console,
+ Session,
+ Network,
+ NetworkResources,
+};
diff --git a/.codesandbox/node/net.js b/.codesandbox/node/net.js
new file mode 100644
index 0000000..c6ff6ce
--- /dev/null
+++ b/.codesandbox/node/net.js
@@ -0,0 +1,2480 @@
+'use strict';
+
+const {
+ ArrayIsArray,
+ ArrayPrototypeIncludes,
+ ArrayPrototypeIndexOf,
+ ArrayPrototypePush,
+ Boolean,
+ FunctionPrototypeBind,
+ FunctionPrototypeCall,
+ MathMax,
+ Number,
+ NumberIsNaN,
+ NumberParseInt,
+ ObjectDefineProperty,
+ ObjectSetPrototypeOf,
+ Symbol,
+ SymbolAsyncDispose,
+ SymbolDispose,
+} = primordials;
+
+const EventEmitter = require('events');
+const { addAbortListener } = require('internal/events/abort_listener');
+const stream = require('stream');
+let debug = require('internal/util/debuglog').debuglog('net', (fn) => {
+ debug = fn;
+});
+const {
+ kReinitializeHandle,
+ isIP,
+ isIPv4,
+ isIPv6,
+ normalizedArgsSymbol,
+ makeSyncWrite,
+} = require('internal/net');
+const assert = require('internal/assert');
+const {
+ UV_EADDRINUSE,
+ UV_EINVAL,
+ UV_ENOTCONN,
+ UV_ECANCELED,
+ UV_ETIMEDOUT,
+} = internalBinding('uv');
+const { convertIpv6StringToBuffer } = internalBinding('cares_wrap');
+
+const { Buffer } = require('buffer');
+const { ShutdownWrap } = internalBinding('stream_wrap');
+const {
+ TCP,
+ TCPConnectWrap,
+ constants: TCPConstants,
+} = internalBinding('tcp_wrap');
+const {
+ Pipe,
+ PipeConnectWrap,
+ constants: PipeConstants,
+} = internalBinding('pipe_wrap');
+const {
+ newAsyncId,
+ defaultTriggerAsyncIdScope,
+ symbols: { async_id_symbol, owner_symbol },
+} = require('internal/async_hooks');
+const {
+ writevGeneric,
+ writeGeneric,
+ onStreamRead,
+ kAfterAsyncWrite,
+ kHandle,
+ kUpdateTimer,
+ setStreamTimeout,
+ kBuffer,
+ kBufferCb,
+ kBufferGen,
+} = require('internal/stream_base_commons');
+const {
+ ErrnoException,
+ ExceptionWithHostPort,
+ NodeAggregateError,
+ UVExceptionWithHostPort,
+ codes: {
+ ERR_INVALID_ADDRESS_FAMILY,
+ ERR_INVALID_ARG_TYPE,
+ ERR_INVALID_ARG_VALUE,
+ ERR_INVALID_FD_TYPE,
+ ERR_INVALID_HANDLE_TYPE,
+ ERR_INVALID_IP_ADDRESS,
+ ERR_IP_BLOCKED,
+ ERR_MISSING_ARGS,
+ ERR_SERVER_ALREADY_LISTEN,
+ ERR_SERVER_NOT_RUNNING,
+ ERR_SOCKET_CLOSED,
+ ERR_SOCKET_CLOSED_BEFORE_CONNECTION,
+ ERR_SOCKET_CONNECTION_TIMEOUT,
+ },
+ genericNodeError,
+} = require('internal/errors');
+const { isUint8Array } = require('internal/util/types');
+const { queueMicrotask } = require('internal/process/task_queues');
+const {
+ guessHandleType,
+ isWindows,
+ kEmptyObject,
+ promisify,
+} = require('internal/util');
+const {
+ validateAbortSignal,
+ validateBoolean,
+ validateFunction,
+ validateInt32,
+ validateNumber,
+ validatePort,
+ validateString,
+} = require('internal/validators');
+const kLastWriteQueueSize = Symbol('lastWriteQueueSize');
+const { getOptionValue } = require('internal/options');
+
+// Lazy loaded to improve startup performance.
+let cluster;
+let dns;
+let BlockList;
+let SocketAddress;
+let autoSelectFamilyDefault = getOptionValue('--network-family-autoselection');
+let autoSelectFamilyAttemptTimeoutDefault = getOptionValue('--network-family-autoselection-attempt-timeout');
+
+const { clearTimeout, setTimeout } = require('timers');
+const { kTimeout } = require('internal/timers');
+
+const DEFAULT_IPV4_ADDR = '0.0.0.0';
+const DEFAULT_IPV6_ADDR = '::';
+
+const noop = () => {};
+
+const kPerfHooksNetConnectContext = Symbol('kPerfHooksNetConnectContext');
+
+const dc = require('diagnostics_channel');
+const netClientSocketChannel = dc.channel('net.client.socket');
+const netServerSocketChannel = dc.channel('net.server.socket');
+const netServerListen = dc.tracingChannel('net.server.listen');
+
+const {
+ hasObserver,
+ startPerf,
+ stopPerf,
+} = require('internal/perf/observe');
+const { getDefaultHighWaterMark } = require('internal/streams/state');
+
+function getFlags(options) {
+ let flags = 0;
+ if (options.ipv6Only === true) {
+ flags |= TCPConstants.UV_TCP_IPV6ONLY;
+ }
+ if (options.reusePort === true) {
+ flags |= TCPConstants.UV_TCP_REUSEPORT;
+ }
+ return flags;
+}
+
+function createHandle(fd, is_server) {
+ validateInt32(fd, 'fd', 0);
+ const type = guessHandleType(fd);
+ if (type === 'PIPE') {
+ return new Pipe(
+ is_server ? PipeConstants.SERVER : PipeConstants.SOCKET,
+ );
+ }
+
+ if (type === 'TCP') {
+ return new TCP(
+ is_server ? TCPConstants.SERVER : TCPConstants.SOCKET,
+ );
+ }
+
+ throw new ERR_INVALID_FD_TYPE(type);
+}
+
+
+function getNewAsyncId(handle) {
+ return (!handle || typeof handle.getAsyncId !== 'function') ?
+ newAsyncId() : handle.getAsyncId();
+}
+
+
+function isPipeName(s) {
+ return typeof s === 'string' && toNumber(s) === false;
+}
+
+/**
+ * Creates a new TCP or IPC server
+ * @param {{
+ * allowHalfOpen?: boolean;
+ * pauseOnConnect?: boolean;
+ * }} [options]
+ * @param {Function} [connectionListener]
+ * @returns {Server}
+ */
+
+function createServer(options, connectionListener) {
+ return new Server(options, connectionListener);
+}
+
+
+// Target API:
+//
+// let s = net.connect({port: 80, host: 'google.com'}, function() {
+// ...
+// });
+//
+// There are various forms:
+//
+// connect(options, [cb])
+// connect(port, [host], [cb])
+// connect(path, [cb]);
+//
+function connect(...args) {
+ const normalized = normalizeArgs(args);
+ const options = normalized[0];
+ debug('createConnection', normalized);
+ const socket = new Socket(options);
+
+ if (options.timeout) {
+ socket.setTimeout(options.timeout);
+ }
+
+ return socket.connect(normalized);
+}
+
+function getDefaultAutoSelectFamily() {
+ return autoSelectFamilyDefault;
+}
+
+function setDefaultAutoSelectFamily(value) {
+ validateBoolean(value, 'value');
+ autoSelectFamilyDefault = value;
+}
+
+function getDefaultAutoSelectFamilyAttemptTimeout() {
+ return autoSelectFamilyAttemptTimeoutDefault;
+}
+
+function setDefaultAutoSelectFamilyAttemptTimeout(value) {
+ validateInt32(value, 'value', 1);
+
+ if (value < 10) {
+ value = 10;
+ }
+
+ autoSelectFamilyAttemptTimeoutDefault = value;
+}
+
+// Returns an array [options, cb], where options is an object,
+// cb is either a function or null.
+// Used to normalize arguments of Socket.prototype.connect() and
+// Server.prototype.listen(). Possible combinations of parameters:
+// (options[...][, cb])
+// (path[...][, cb])
+// ([port][, host][...][, cb])
+// For Socket.prototype.connect(), the [...] part is ignored
+// For Server.prototype.listen(), the [...] part is [, backlog]
+// but will not be handled here (handled in listen())
+function normalizeArgs(args) {
+ let arr;
+
+ if (args.length === 0) {
+ arr = [{}, null];
+ arr[normalizedArgsSymbol] = true;
+ return arr;
+ }
+
+ const arg0 = args[0];
+ let options = {};
+ if (typeof arg0 === 'object' && arg0 !== null) {
+ // (options[...][, cb])
+ options = arg0;
+ } else if (isPipeName(arg0)) {
+ // (path[...][, cb])
+ options.path = arg0;
+ } else {
+ // ([port][, host][...][, cb])
+ options.port = arg0;
+ if (args.length > 1 && typeof args[1] === 'string') {
+ options.host = args[1];
+ }
+ }
+
+ const cb = args[args.length - 1];
+ if (typeof cb !== 'function')
+ arr = [options, null];
+ else
+ arr = [options, cb];
+
+ arr[normalizedArgsSymbol] = true;
+ return arr;
+}
+
+
+// Called when creating new Socket, or when re-using a closed Socket
+function initSocketHandle(self) {
+ self._undestroy();
+ self._sockname = null;
+
+ // Handle creation may be deferred to bind() or connect() time.
+ if (self._handle) {
+ self._handle[owner_symbol] = self;
+ self._handle.onread = onStreamRead;
+ self[async_id_symbol] = getNewAsyncId(self._handle);
+
+ let userBuf = self[kBuffer];
+ if (userBuf) {
+ const bufGen = self[kBufferGen];
+ if (bufGen !== null) {
+ userBuf = bufGen();
+ if (!isUint8Array(userBuf))
+ return;
+ self[kBuffer] = userBuf;
+ }
+ self._handle.useUserBuffer(userBuf);
+ }
+ }
+}
+
+function closeSocketHandle(self, isException, isCleanupPending = false) {
+ if (self._handle) {
+ self._handle.close(() => {
+ debug('emit close');
+ self.emit('close', isException);
+ if (isCleanupPending) {
+ self._handle.onread = noop;
+ self._handle = null;
+ self._sockname = null;
+ }
+ });
+ }
+}
+
+const kBytesRead = Symbol('kBytesRead');
+const kBytesWritten = Symbol('kBytesWritten');
+const kSetNoDelay = Symbol('kSetNoDelay');
+const kSetKeepAlive = Symbol('kSetKeepAlive');
+const kSetKeepAliveInitialDelay = Symbol('kSetKeepAliveInitialDelay');
+
+function Socket(options) {
+ if (!(this instanceof Socket)) return new Socket(options);
+ if (options?.objectMode) {
+ throw new ERR_INVALID_ARG_VALUE(
+ 'options.objectMode',
+ options.objectMode,
+ 'is not supported',
+ );
+ } else if (options?.readableObjectMode || options?.writableObjectMode) {
+ throw new ERR_INVALID_ARG_VALUE(
+ `options.${
+ options.readableObjectMode ? 'readableObjectMode' : 'writableObjectMode'
+ }`,
+ options.readableObjectMode || options.writableObjectMode,
+ 'is not supported',
+ );
+ }
+ if (options?.keepAliveInitialDelay !== undefined) {
+ validateNumber(
+ options?.keepAliveInitialDelay, 'options.keepAliveInitialDelay',
+ );
+
+ if (options.keepAliveInitialDelay < 0) {
+ options.keepAliveInitialDelay = 0;
+ }
+ }
+
+ this.connecting = false;
+ // Problem with this is that users can supply their own handle, that may not
+ // have _handle.getAsyncId(). In this case an[async_id_symbol] should
+ // probably be supplied by async_hooks.
+ this[async_id_symbol] = -1;
+ this._hadError = false;
+ this[kHandle] = null;
+ this._parent = null;
+ this._host = null;
+ this[kLastWriteQueueSize] = 0;
+ this[kTimeout] = null;
+ this[kBuffer] = null;
+ this[kBufferCb] = null;
+ this[kBufferGen] = null;
+ this._closeAfterHandlingError = false;
+
+ if (typeof options === 'number')
+ options = { fd: options }; // Legacy interface.
+ else
+ options = { ...options };
+
+ // Default to *not* allowing half open sockets.
+ options.allowHalfOpen = Boolean(options.allowHalfOpen);
+ // For backwards compat do not emit close on destroy.
+ options.emitClose = false;
+ options.autoDestroy = true;
+ // Handle strings directly.
+ options.decodeStrings = false;
+ stream.Duplex.call(this, options);
+
+ if (options.handle) {
+ this._handle = options.handle; // private
+ this[async_id_symbol] = getNewAsyncId(this._handle);
+ } else if (options.fd !== undefined) {
+ const { fd } = options;
+ let err;
+
+ // createHandle will throw ERR_INVALID_FD_TYPE if `fd` is not
+ // a valid `PIPE` or `TCP` descriptor
+ this._handle = createHandle(fd, false);
+
+ err = this._handle.open(fd);
+
+ // While difficult to fabricate, in some architectures
+ // `open` may return an error code for valid file descriptors
+ // which cannot be opened. This is difficult to test as most
+ // un-openable fds will throw on `createHandle`
+ if (err)
+ throw new ErrnoException(err, 'open');
+
+ this[async_id_symbol] = this._handle.getAsyncId();
+
+ if ((fd === 1 || fd === 2) &&
+ (this._handle instanceof Pipe) && isWindows) {
+ // Make stdout and stderr blocking on Windows
+ err = this._handle.setBlocking(true);
+ if (err)
+ throw new ErrnoException(err, 'setBlocking');
+
+ this._writev = null;
+ this._write = makeSyncWrite(fd);
+ // makeSyncWrite adjusts this value like the original handle would, so
+ // we need to let it do that by turning it into a writable, own
+ // property.
+ ObjectDefineProperty(this._handle, 'bytesWritten', {
+ __proto__: null,
+ value: 0, writable: true,
+ });
+ }
+ }
+
+ const onread = options.onread;
+ if (onread !== null && typeof onread === 'object' &&
+ (isUint8Array(onread.buffer) || typeof onread.buffer === 'function') &&
+ typeof onread.callback === 'function') {
+ if (typeof onread.buffer === 'function') {
+ this[kBuffer] = true;
+ this[kBufferGen] = onread.buffer;
+ } else {
+ this[kBuffer] = onread.buffer;
+ }
+ this[kBufferCb] = onread.callback;
+ }
+
+ this[kSetNoDelay] = Boolean(options.noDelay);
+ this[kSetKeepAlive] = Boolean(options.keepAlive);
+ this[kSetKeepAliveInitialDelay] = ~~(options.keepAliveInitialDelay / 1000);
+
+ // Shut down the socket when we're finished with it.
+ this.on('end', onReadableStreamEnd);
+
+ initSocketHandle(this);
+
+ this._pendingData = null;
+ this._pendingEncoding = '';
+
+ // If we have a handle, then start the flow of data into the
+ // buffer. if not, then this will happen when we connect
+ if (this._handle && options.readable !== false) {
+ if (options.pauseOnCreate) {
+ // Stop the handle from reading and pause the stream
+ this._handle.reading = false;
+ this._handle.readStop();
+ this.readableFlowing = false;
+ } else if (!options.manualStart) {
+ this.read(0);
+ }
+ }
+
+ if (options.signal) {
+ addClientAbortSignalOption(this, options);
+ }
+
+ // Reserve properties
+ this.server = null;
+ this._server = null;
+
+ // Used after `.destroy()`
+ this[kBytesRead] = 0;
+ this[kBytesWritten] = 0;
+ if (options.blockList) {
+ if (!module.exports.BlockList.isBlockList(options.blockList)) {
+ throw new ERR_INVALID_ARG_TYPE('options.blockList', 'net.BlockList', options.blockList);
+ }
+ this.blockList = options.blockList;
+ }
+}
+ObjectSetPrototypeOf(Socket.prototype, stream.Duplex.prototype);
+ObjectSetPrototypeOf(Socket, stream.Duplex);
+
+// Refresh existing timeouts.
+Socket.prototype._unrefTimer = function _unrefTimer() {
+ for (let s = this; s !== null; s = s._parent) {
+ if (s[kTimeout])
+ s[kTimeout].refresh();
+ }
+};
+
+
+// The user has called .end(), and all the bytes have been
+// sent out to the other side.
+Socket.prototype._final = function(cb) {
+ // If still connecting - defer handling `_final` until 'connect' will happen
+ if (this.connecting) {
+ debug('_final: not yet connected');
+ return this.once('connect', () => this._final(cb));
+ }
+
+ if (!this._handle)
+ return cb();
+
+ debug('_final: not ended, call shutdown()');
+
+ const req = new ShutdownWrap();
+ req.oncomplete = afterShutdown;
+ req.handle = this._handle;
+ req.callback = cb;
+ const err = this._handle.shutdown(req);
+
+ if (err === 1 || err === UV_ENOTCONN) // synchronous finish
+ return cb();
+ else if (err !== 0)
+ return cb(new ErrnoException(err, 'shutdown'));
+};
+
+function afterShutdown() {
+ const self = this.handle[owner_symbol];
+
+ debug('afterShutdown destroyed=%j', self.destroyed);
+
+ this.callback();
+}
+
+// Provide a better error message when we call end() as a result
+// of the other side sending a FIN. The standard 'write after end'
+// is overly vague, and makes it seem like the user's code is to blame.
+function writeAfterFIN(chunk, encoding, cb) {
+ if (!this.writableEnded) {
+ return stream.Duplex.prototype.write.call(this, chunk, encoding, cb);
+ }
+
+ if (typeof encoding === 'function') {
+ cb = encoding;
+ encoding = null;
+ }
+
+ const er = genericNodeError(
+ 'This socket has been ended by the other party',
+ { code: 'EPIPE' },
+ );
+ if (typeof cb === 'function') {
+ defaultTriggerAsyncIdScope(this[async_id_symbol], process.nextTick, cb, er);
+ }
+ this.destroy(er);
+
+ return false;
+}
+
+Socket.prototype.setTimeout = setStreamTimeout;
+
+
+Socket.prototype._onTimeout = function() {
+ const handle = this._handle;
+ const lastWriteQueueSize = this[kLastWriteQueueSize];
+ if (lastWriteQueueSize > 0 && handle) {
+ // `lastWriteQueueSize !== writeQueueSize` means there is
+ // an active write in progress, so we suppress the timeout.
+ const { writeQueueSize } = handle;
+ if (lastWriteQueueSize !== writeQueueSize) {
+ this[kLastWriteQueueSize] = writeQueueSize;
+ this._unrefTimer();
+ return;
+ }
+ }
+ debug('_onTimeout');
+ this.emit('timeout');
+};
+
+
+Socket.prototype.setNoDelay = function(enable) {
+ // Backwards compatibility: assume true when `enable` is omitted
+ enable = Boolean(enable === undefined ? true : enable);
+
+ if (!this._handle) {
+ this[kSetNoDelay] = enable;
+ return this;
+ }
+
+ if (this._handle.setNoDelay && enable !== this[kSetNoDelay]) {
+ this[kSetNoDelay] = enable;
+ this._handle.setNoDelay(enable);
+ }
+
+ return this;
+};
+
+
+Socket.prototype.setKeepAlive = function(enable, initialDelayMsecs) {
+ enable = Boolean(enable);
+ const initialDelay = ~~(initialDelayMsecs / 1000);
+
+ if (!this._handle) {
+ this[kSetKeepAlive] = enable;
+ this[kSetKeepAliveInitialDelay] = initialDelay;
+ return this;
+ }
+
+ if (!this._handle.setKeepAlive) {
+ return this;
+ }
+
+ if (enable !== this[kSetKeepAlive] ||
+ (
+ enable &&
+ this[kSetKeepAliveInitialDelay] !== initialDelay
+ )
+ ) {
+ this[kSetKeepAlive] = enable;
+ this[kSetKeepAliveInitialDelay] = initialDelay;
+ this._handle.setKeepAlive(enable, initialDelay);
+ }
+
+ return this;
+};
+
+
+Socket.prototype.address = function() {
+ return this._getsockname();
+};
+
+
+ObjectDefineProperty(Socket.prototype, '_connecting', {
+ __proto__: null,
+ get: function() {
+ return this.connecting;
+ },
+});
+
+ObjectDefineProperty(Socket.prototype, 'pending', {
+ __proto__: null,
+ get() {
+ return !this._handle || this.connecting;
+ },
+ configurable: true,
+});
+
+
+ObjectDefineProperty(Socket.prototype, 'readyState', {
+ __proto__: null,
+ get: function() {
+ if (this.connecting) {
+ return 'opening';
+ } else if (this.readable && this.writable) {
+ return 'open';
+ } else if (this.readable && !this.writable) {
+ return 'readOnly';
+ } else if (!this.readable && this.writable) {
+ return 'writeOnly';
+ }
+ return 'closed';
+ },
+});
+
+
+ObjectDefineProperty(Socket.prototype, 'bufferSize', {
+ __proto__: null,
+ get: function() {
+ if (this._handle) {
+ return this.writableLength;
+ }
+ },
+});
+
+ObjectDefineProperty(Socket.prototype, kUpdateTimer, {
+ __proto__: null,
+ get: function() {
+ return this._unrefTimer;
+ },
+});
+
+
+function tryReadStart(socket) {
+ // Not already reading, start the flow
+ debug('Socket._handle.readStart');
+ socket._handle.reading = true;
+ const err = socket._handle.readStart();
+ if (err)
+ socket.destroy(new ErrnoException(err, 'read'));
+}
+
+// Just call handle.readStart until we have enough in the buffer
+Socket.prototype._read = function(n) {
+ debug(
+ '_read - n', n,
+ 'isConnecting?', !!this.connecting,
+ 'hasHandle?', !!this._handle,
+ );
+
+ if (this.connecting || !this._handle) {
+ debug('_read wait for connection');
+ this.once('connect', () => this._read(n));
+ } else if (!this._handle.reading) {
+ tryReadStart(this);
+ }
+};
+
+
+Socket.prototype.end = function(data, encoding, callback) {
+ stream.Duplex.prototype.end.call(this,
+ data, encoding, callback);
+ return this;
+};
+
+Socket.prototype.resetAndDestroy = function() {
+ if (this._handle) {
+ if (!(this._handle instanceof TCP))
+ throw new ERR_INVALID_HANDLE_TYPE();
+ if (this.connecting) {
+ debug('reset wait for connection');
+ this.once('connect', () => this._reset());
+ } else {
+ this._reset();
+ }
+ } else {
+ this.destroy(new ERR_SOCKET_CLOSED());
+ }
+ return this;
+};
+
+Socket.prototype.pause = function() {
+ if (this[kBuffer] && !this.connecting && this._handle?.reading) {
+ this._handle.reading = false;
+ if (!this.destroyed) {
+ const err = this._handle.readStop();
+ if (err)
+ this.destroy(new ErrnoException(err, 'read'));
+ }
+ }
+ return stream.Duplex.prototype.pause.call(this);
+};
+
+
+Socket.prototype.resume = function() {
+ if (this[kBuffer] && !this.connecting && this._handle &&
+ !this._handle.reading) {
+ tryReadStart(this);
+ }
+ return stream.Duplex.prototype.resume.call(this);
+};
+
+
+Socket.prototype.read = function(n) {
+ if (this[kBuffer] && !this.connecting && this._handle &&
+ !this._handle.reading) {
+ tryReadStart(this);
+ }
+ return stream.Duplex.prototype.read.call(this, n);
+};
+
+
+// Called when the 'end' event is emitted.
+function onReadableStreamEnd() {
+ if (!this.allowHalfOpen) {
+ this.write = writeAfterFIN;
+ }
+}
+
+
+Socket.prototype.destroySoon = function() {
+ if (this.writable)
+ this.end();
+
+ if (this.writableFinished)
+ this.destroy();
+ else
+ this.once('finish', this.destroy);
+};
+
+
+Socket.prototype._destroy = function(exception, cb) {
+ debug('destroy');
+
+ this.connecting = false;
+
+ for (let s = this; s !== null; s = s._parent) {
+ clearTimeout(s[kTimeout]);
+ }
+
+ debug('close');
+ if (this._handle) {
+ if (this !== process.stderr)
+ debug('close handle');
+ const isException = exception ? true : false;
+ // `bytesRead` and `kBytesWritten` should be accessible after `.destroy()`
+ this[kBytesRead] = this._handle.bytesRead;
+ this[kBytesWritten] = this._handle.bytesWritten;
+
+ if (this.resetAndClosing) {
+ this.resetAndClosing = false;
+ const err = this._handle.reset(() => {
+ debug('emit close');
+ this.emit('close', isException);
+ });
+ if (err)
+ this.emit('error', new ErrnoException(err, 'reset'));
+ } else if (this._closeAfterHandlingError) {
+ // Enqueue closing the socket as a microtask, so that the socket can be
+ // accessible when an `error` event is handled in the `next tick queue`.
+ queueMicrotask(() => closeSocketHandle(this, isException, true));
+ } else {
+ closeSocketHandle(this, isException);
+ }
+
+ if (!this._closeAfterHandlingError) {
+ this._handle.onread = noop;
+ this._handle = null;
+ this._sockname = null;
+ }
+ cb(exception);
+ } else {
+ cb(exception);
+ process.nextTick(emitCloseNT, this);
+ }
+
+ if (this._server) {
+ debug('has server');
+ this._server._connections--;
+ if (this._server._emitCloseIfDrained) {
+ this._server._emitCloseIfDrained();
+ }
+ }
+};
+
+Socket.prototype._reset = function() {
+ debug('reset connection');
+ this.resetAndClosing = true;
+ return this.destroy();
+};
+
+Socket.prototype._getpeername = function() {
+ if (!this._handle || !this._handle.getpeername || this.connecting) {
+ return this._peername || {};
+ } else if (!this._peername) {
+ const out = {};
+ const err = this._handle.getpeername(out);
+ if (err) return out;
+ this._peername = out;
+ }
+ return this._peername;
+};
+
+function protoGetter(name, callback) {
+ ObjectDefineProperty(Socket.prototype, name, {
+ __proto__: null,
+ configurable: false,
+ enumerable: true,
+ get: callback,
+ });
+}
+
+protoGetter('bytesRead', function bytesRead() {
+ return this._handle ? this._handle.bytesRead : this[kBytesRead];
+});
+
+protoGetter('remoteAddress', function remoteAddress() {
+ return this._getpeername().address;
+});
+
+protoGetter('remoteFamily', function remoteFamily() {
+ return this._getpeername().family;
+});
+
+protoGetter('remotePort', function remotePort() {
+ return this._getpeername().port;
+});
+
+
+Socket.prototype._getsockname = function() {
+ if (!this._handle || !this._handle.getsockname) {
+ return {};
+ } else if (!this._sockname) {
+ this._sockname = {};
+ // FIXME(bnoordhuis) Throw when the return value is not 0?
+ this._handle.getsockname(this._sockname);
+ }
+ return this._sockname;
+};
+
+
+protoGetter('localAddress', function localAddress() {
+ return this._getsockname().address;
+});
+
+
+protoGetter('localPort', function localPort() {
+ return this._getsockname().port;
+});
+
+protoGetter('localFamily', function localFamily() {
+ return this._getsockname().family;
+});
+
+Socket.prototype[kAfterAsyncWrite] = function() {
+ this[kLastWriteQueueSize] = 0;
+};
+
+Socket.prototype._writeGeneric = function(writev, data, encoding, cb) {
+ // If we are still connecting, then buffer this for later.
+ // The Writable logic will buffer up any more writes while
+ // waiting for this one to be done.
+ if (this.connecting) {
+ this._pendingData = data;
+ this._pendingEncoding = encoding;
+ this.once('connect', function connect() {
+ this.off('close', onClose);
+ this._writeGeneric(writev, data, encoding, cb);
+ });
+ function onClose() {
+ cb(new ERR_SOCKET_CLOSED_BEFORE_CONNECTION());
+ }
+ this.once('close', onClose);
+ return;
+ }
+ this._pendingData = null;
+ this._pendingEncoding = '';
+
+ if (!this._handle) {
+ cb(new ERR_SOCKET_CLOSED());
+ return false;
+ }
+
+ this._unrefTimer();
+
+ let req;
+ if (writev)
+ req = writevGeneric(this, data, cb);
+ else
+ req = writeGeneric(this, data, encoding, cb);
+ if (req.async)
+ this[kLastWriteQueueSize] = req.bytes;
+};
+
+
+Socket.prototype._writev = function(chunks, cb) {
+ this._writeGeneric(true, chunks, '', cb);
+};
+
+
+Socket.prototype._write = function(data, encoding, cb) {
+ this._writeGeneric(false, data, encoding, cb);
+};
+
+
+// Legacy alias. Having this is probably being overly cautious, but it doesn't
+// really hurt anyone either. This can probably be removed safely if desired.
+protoGetter('_bytesDispatched', function _bytesDispatched() {
+ return this._handle ? this._handle.bytesWritten : this[kBytesWritten];
+});
+
+protoGetter('bytesWritten', function bytesWritten() {
+ let bytes = this._bytesDispatched;
+ const data = this._pendingData;
+ const encoding = this._pendingEncoding;
+ const writableBuffer = this.writableBuffer;
+
+ if (!writableBuffer)
+ return undefined;
+
+ for (const el of writableBuffer) {
+ bytes += el.chunk instanceof Buffer ?
+ el.chunk.length :
+ Buffer.byteLength(el.chunk, el.encoding);
+ }
+
+ if (ArrayIsArray(data)) {
+ // Was a writev, iterate over chunks to get total length
+ for (let i = 0; i < data.length; i++) {
+ const chunk = data[i];
+
+ if (data.allBuffers || chunk instanceof Buffer)
+ bytes += chunk.length;
+ else
+ bytes += Buffer.byteLength(chunk.chunk, chunk.encoding);
+ }
+ } else if (data) {
+ // Writes are either a string or a Buffer.
+ if (typeof data !== 'string')
+ bytes += data.length;
+ else
+ bytes += Buffer.byteLength(data, encoding);
+ }
+
+ return bytes;
+});
+
+
+function checkBindError(err, port, handle) {
+ // EADDRINUSE may not be reported until we call listen() or connect().
+ // To complicate matters, a failed bind() followed by listen() or connect()
+ // will implicitly bind to a random port. Ergo, check that the socket is
+ // bound to the expected port before calling listen() or connect().
+ //
+ // FIXME(bnoordhuis) Doesn't work for pipe handles, they don't have a
+ // getsockname() method. Non-issue for now, the cluster module doesn't
+ // really support pipes anyway.
+ if (err === 0 && port > 0 && handle.getsockname) {
+ const out = {};
+ err = handle.getsockname(out);
+ if (err === 0 && port !== out.port) {
+ debug(`checkBindError, bound to ${out.port} instead of ${port}`);
+ err = UV_EADDRINUSE;
+ }
+ }
+ return err;
+}
+
+
+function internalConnect(
+ self, address, port, addressType, localAddress, localPort, flags) {
+ // TODO return promise from Socket.prototype.connect which
+ // wraps _connectReq.
+
+ assert(self.connecting);
+
+ let err;
+
+ if (localAddress || localPort) {
+ if (addressType === 4) {
+ localAddress ||= DEFAULT_IPV4_ADDR;
+ err = self._handle.bind(localAddress, localPort);
+ } else { // addressType === 6
+ localAddress ||= DEFAULT_IPV6_ADDR;
+ err = self._handle.bind6(localAddress, localPort, flags);
+ }
+ debug('connect: binding to localAddress: %s and localPort: %d (addressType: %d)',
+ localAddress, localPort, addressType);
+
+ err = checkBindError(err, localPort, self._handle);
+ if (err) {
+ const ex = new ExceptionWithHostPort(err, 'bind', localAddress, localPort);
+ self.destroy(ex);
+ return;
+ }
+ }
+
+ debug('connect: attempting to connect to %s:%d (addressType: %d)', address, port, addressType);
+ self.emit('connectionAttempt', address, port, addressType);
+
+ if (addressType === 6 || addressType === 4) {
+ if (self.blockList?.check(address, `ipv${addressType}`)) {
+ self.destroy(new ERR_IP_BLOCKED(address));
+ return;
+ }
+ const req = new TCPConnectWrap();
+ req.oncomplete = afterConnect;
+ req.address = address;
+ req.port = port;
+ req.localAddress = localAddress;
+ req.localPort = localPort;
+ req.addressType = addressType;
+
+ if (addressType === 4)
+ err = self._handle.connect(req, address, port);
+ else
+ err = self._handle.connect6(req, address, port);
+ } else {
+ const req = new PipeConnectWrap();
+ req.address = address;
+ req.oncomplete = afterConnect;
+
+ err = self._handle.connect(req, address);
+ }
+
+ if (err) {
+ const sockname = self._getsockname();
+ let details;
+
+ if (sockname) {
+ details = sockname.address + ':' + sockname.port;
+ }
+
+ const ex = new ExceptionWithHostPort(err, 'connect', address, port, details);
+ self.destroy(ex);
+ } else if ((addressType === 6 || addressType === 4) && hasObserver('net')) {
+ startPerf(self, kPerfHooksNetConnectContext, { type: 'net', name: 'connect', detail: { host: address, port } });
+ }
+}
+
+
+function internalConnectMultiple(context, canceled) {
+ clearTimeout(context[kTimeout]);
+ const self = context.socket;
+
+ // We were requested to abort. Stop all operations
+ if (self._aborted) {
+ return;
+ }
+
+ // All connections have been tried without success, destroy with error
+ if (canceled || context.current === context.addresses.length) {
+ if (context.errors.length === 0) {
+ self.destroy(new ERR_SOCKET_CONNECTION_TIMEOUT());
+ return;
+ }
+
+ self.destroy(new NodeAggregateError(context.errors));
+ return;
+ }
+
+ assert(self.connecting);
+
+ const current = context.current++;
+
+ if (current > 0) {
+ self[kReinitializeHandle](new TCP(TCPConstants.SOCKET));
+ }
+
+ const { localPort, port, flags } = context;
+ const { address, family: addressType } = context.addresses[current];
+ let localAddress;
+ let err;
+
+ if (localPort) {
+ if (addressType === 4) {
+ localAddress = DEFAULT_IPV4_ADDR;
+ err = self._handle.bind(localAddress, localPort);
+ } else { // addressType === 6
+ localAddress = DEFAULT_IPV6_ADDR;
+ err = self._handle.bind6(localAddress, localPort, flags);
+ }
+
+ debug('connect/multiple: binding to localAddress: %s and localPort: %d (addressType: %d)',
+ localAddress, localPort, addressType);
+
+ err = checkBindError(err, localPort, self._handle);
+ if (err) {
+ ArrayPrototypePush(context.errors, new ExceptionWithHostPort(err, 'bind', localAddress, localPort));
+ internalConnectMultiple(context);
+ return;
+ }
+ }
+
+ if (self.blockList?.check(address, `ipv${addressType}`)) {
+ const ex = new ERR_IP_BLOCKED(address);
+ ArrayPrototypePush(context.errors, ex);
+ self.emit('connectionAttemptFailed', address, port, addressType, ex);
+ internalConnectMultiple(context);
+ return;
+ }
+
+ debug('connect/multiple: attempting to connect to %s:%d (addressType: %d)', address, port, addressType);
+ self.emit('connectionAttempt', address, port, addressType);
+
+ const req = new TCPConnectWrap();
+ req.oncomplete = FunctionPrototypeBind(afterConnectMultiple, undefined, context, current);
+ req.address = address;
+ req.port = port;
+ req.localAddress = localAddress;
+ req.localPort = localPort;
+ req.addressType = addressType;
+
+ ArrayPrototypePush(self.autoSelectFamilyAttemptedAddresses, `${address}:${port}`);
+
+ if (addressType === 4) {
+ err = self._handle.connect(req, address, port);
+ } else {
+ err = self._handle.connect6(req, address, port);
+ }
+
+ if (err) {
+ const sockname = self._getsockname();
+ let details;
+
+ if (sockname) {
+ details = sockname.address + ':' + sockname.port;
+ }
+
+ const ex = new ExceptionWithHostPort(err, 'connect', address, port, details);
+ ArrayPrototypePush(context.errors, ex);
+
+ self.emit('connectionAttemptFailed', address, port, addressType, ex);
+ internalConnectMultiple(context);
+ return;
+ }
+
+ if (current < context.addresses.length - 1) {
+ debug('connect/multiple: setting the attempt timeout to %d ms', context.timeout);
+
+ // If the attempt has not returned an error, start the connection timer
+ context[kTimeout] = setTimeout(internalConnectMultipleTimeout, context.timeout, context, req, self._handle);
+ }
+}
+
+Socket.prototype.connect = function(...args) {
+ let normalized;
+ // If passed an array, it's treated as an array of arguments that have
+ // already been normalized (so we don't normalize more than once). This has
+ // been solved before in https://github.com/nodejs/node/pull/12342, but was
+ // reverted as it had unintended side effects.
+ if (ArrayIsArray(args[0]) && args[0][normalizedArgsSymbol]) {
+ normalized = args[0];
+ } else {
+ normalized = normalizeArgs(args);
+ }
+ const options = normalized[0];
+ const cb = normalized[1];
+
+ if (netClientSocketChannel.hasSubscribers) {
+ netClientSocketChannel.publish({
+ socket: this,
+ });
+ }
+
+ if (cb !== null) {
+ this.once('connect', cb);
+ }
+
+ // If the parent is already connecting, do not attempt to connect again
+ if (this._parent?.connecting) {
+ return this;
+ }
+
+ // options.port === null will be checked later.
+ if (options.port === undefined && options.path == null)
+ throw new ERR_MISSING_ARGS(['options', 'port', 'path']);
+
+ if (this.write !== Socket.prototype.write)
+ this.write = Socket.prototype.write;
+
+ if (this.destroyed) {
+ this._handle = null;
+ this._peername = null;
+ this._sockname = null;
+ }
+
+ const { path } = options;
+ const pipe = !!path;
+ debug('pipe', pipe, path);
+
+ if (!this._handle) {
+ this._handle = pipe ?
+ new Pipe(PipeConstants.SOCKET) :
+ new TCP(TCPConstants.SOCKET);
+ initSocketHandle(this);
+ }
+
+ this._unrefTimer();
+
+ this.connecting = true;
+
+ if (pipe) {
+ validateString(path, 'options.path');
+ defaultTriggerAsyncIdScope(
+ this[async_id_symbol], internalConnect, this, path,
+ );
+ } else {
+ lookupAndConnect(this, options);
+ }
+ return this;
+};
+
+Socket.prototype[kReinitializeHandle] = function reinitializeHandle(handle) {
+ this._handle?.close();
+
+ this._handle = handle;
+ this._handle[owner_symbol] = this;
+
+ initSocketHandle(this);
+};
+
+function socketToDnsFamily(family) {
+ switch (family) {
+ case 'IPv4':
+ return 4;
+ case 'IPv6':
+ return 6;
+ }
+
+ return family;
+}
+
+function lookupAndConnect(self, options) {
+ const { localAddress, localPort } = options;
+ const host = options.host || 'localhost';
+ let { port, autoSelectFamilyAttemptTimeout, autoSelectFamily } = options;
+
+ validateString(host, 'options.host');
+
+ if (localAddress && !isIP(localAddress)) {
+ throw new ERR_INVALID_IP_ADDRESS(localAddress);
+ }
+
+ if (localPort) {
+ validateNumber(localPort, 'options.localPort');
+ }
+
+ if (port !== undefined) {
+ if (typeof port !== 'number' && typeof port !== 'string') {
+ throw new ERR_INVALID_ARG_TYPE('options.port',
+ ['number', 'string'], port);
+ }
+ validatePort(port);
+ }
+ port |= 0;
+
+
+ if (autoSelectFamily != null) {
+ validateBoolean(autoSelectFamily, 'options.autoSelectFamily');
+ } else {
+ autoSelectFamily = autoSelectFamilyDefault;
+ }
+
+ if (autoSelectFamilyAttemptTimeout != null) {
+ validateInt32(autoSelectFamilyAttemptTimeout, 'options.autoSelectFamilyAttemptTimeout', 1);
+
+ if (autoSelectFamilyAttemptTimeout < 10) {
+ autoSelectFamilyAttemptTimeout = 10;
+ }
+ } else {
+ autoSelectFamilyAttemptTimeout = autoSelectFamilyAttemptTimeoutDefault;
+ }
+
+ // If host is an IP, skip performing a lookup
+ const addressType = isIP(host);
+ if (addressType) {
+ defaultTriggerAsyncIdScope(self[async_id_symbol], process.nextTick, () => {
+ if (self.connecting)
+ defaultTriggerAsyncIdScope(
+ self[async_id_symbol],
+ internalConnect,
+ self, host, port, addressType, localAddress, localPort,
+ );
+ });
+ return;
+ }
+
+ if (options.lookup != null)
+ validateFunction(options.lookup, 'options.lookup');
+
+ if (dns === undefined) dns = require('dns');
+ const dnsopts = {
+ family: socketToDnsFamily(options.family),
+ hints: options.hints || 0,
+ };
+
+ if (!isWindows &&
+ dnsopts.family !== 4 &&
+ dnsopts.family !== 6 &&
+ dnsopts.hints === 0) {
+ dnsopts.hints = dns.ADDRCONFIG;
+ }
+
+ debug('connect: find host', host);
+ debug('connect: dns options', dnsopts);
+ self._host = host;
+ const lookup = options.lookup || dns.lookup;
+
+ if (dnsopts.family !== 4 && dnsopts.family !== 6 && !localAddress && autoSelectFamily) {
+ debug('connect: autodetecting');
+
+ dnsopts.all = true;
+ defaultTriggerAsyncIdScope(self[async_id_symbol], function() {
+ lookupAndConnectMultiple(
+ self,
+ async_id_symbol,
+ lookup,
+ host,
+ options,
+ dnsopts,
+ port,
+ localAddress,
+ localPort,
+ autoSelectFamilyAttemptTimeout,
+ );
+ });
+
+ return;
+ }
+
+ defaultTriggerAsyncIdScope(self[async_id_symbol], function() {
+ lookup(host, dnsopts, function emitLookup(err, ip, addressType) {
+ self.emit('lookup', err, ip, addressType, host);
+
+ // It's possible we were destroyed while looking this up.
+ // XXX it would be great if we could cancel the promise returned by
+ // the look up.
+ if (!self.connecting) return;
+
+ if (err) {
+ // net.createConnection() creates a net.Socket object and immediately
+ // calls net.Socket.connect() on it (that's us). There are no event
+ // listeners registered yet so defer the error event to the next tick.
+ process.nextTick(connectErrorNT, self, err);
+ } else if ((typeof ip !== 'string') || !isIP(ip)) {
+ err = new ERR_INVALID_IP_ADDRESS(ip);
+ process.nextTick(connectErrorNT, self, err);
+ } else if (addressType !== 4 && addressType !== 6) {
+ err = new ERR_INVALID_ADDRESS_FAMILY(addressType,
+ options.host,
+ options.port);
+ process.nextTick(connectErrorNT, self, err);
+ } else {
+ self._unrefTimer();
+ defaultTriggerAsyncIdScope(
+ self[async_id_symbol],
+ internalConnect,
+ self, ip, port, addressType, localAddress, localPort,
+ );
+ }
+ });
+ });
+}
+
+function lookupAndConnectMultiple(
+ self, async_id_symbol, lookup, host, options, dnsopts, port, localAddress, localPort, timeout,
+) {
+ defaultTriggerAsyncIdScope(self[async_id_symbol], function emitLookup() {
+ lookup(host, dnsopts, function emitLookup(err, addresses) {
+ // It's possible we were destroyed while looking this up.
+ // XXX it would be great if we could cancel the promise returned by
+ // the look up.
+ if (!self.connecting) {
+ return;
+ } else if (err) {
+ self.emit('lookup', err, undefined, undefined, host);
+
+ // net.createConnection() creates a net.Socket object and immediately
+ // calls net.Socket.connect() on it (that's us). There are no event
+ // listeners registered yet so defer the error event to the next tick.
+ process.nextTick(connectErrorNT, self, err);
+ return;
+ }
+
+ // Filter addresses by only keeping the one which are either IPv4 or IPV6.
+ // The first valid address determines which group has preference on the
+ // alternate family sorting which happens later.
+ const validAddresses = [[], []];
+ const validIps = [[], []];
+ let destinations;
+ for (let i = 0, l = addresses.length; i < l; i++) {
+ const address = addresses[i];
+ const { address: ip, family: addressType } = address;
+ self.emit('lookup', err, ip, addressType, host);
+ // It's possible we were destroyed while looking this up.
+ if (!self.connecting) {
+ return;
+ }
+ if (isIP(ip) && (addressType === 4 || addressType === 6)) {
+ destinations ||= addressType === 6 ? { 6: 0, 4: 1 } : { 4: 0, 6: 1 };
+
+ const destination = destinations[addressType];
+
+ // Only try an address once
+ if (!ArrayPrototypeIncludes(validIps[destination], ip)) {
+ ArrayPrototypePush(validAddresses[destination], address);
+ ArrayPrototypePush(validIps[destination], ip);
+ }
+ }
+ }
+
+
+ // When no AAAA or A records are available, fail on the first one
+ if (!validAddresses[0].length && !validAddresses[1].length) {
+ const { address: firstIp, family: firstAddressType } = addresses[0];
+
+ if (!isIP(firstIp)) {
+ err = new ERR_INVALID_IP_ADDRESS(firstIp);
+ process.nextTick(connectErrorNT, self, err);
+ } else if (firstAddressType !== 4 && firstAddressType !== 6) {
+ err = new ERR_INVALID_ADDRESS_FAMILY(firstAddressType,
+ options.host,
+ options.port);
+ process.nextTick(connectErrorNT, self, err);
+ }
+
+ return;
+ }
+
+ // Sort addresses alternating families
+ const toAttempt = [];
+ for (let i = 0, l = MathMax(validAddresses[0].length, validAddresses[1].length); i < l; i++) {
+ if (i in validAddresses[0]) {
+ ArrayPrototypePush(toAttempt, validAddresses[0][i]);
+ }
+ if (i in validAddresses[1]) {
+ ArrayPrototypePush(toAttempt, validAddresses[1][i]);
+ }
+ }
+
+ if (toAttempt.length === 1) {
+ debug('connect/multiple: only one address found, switching back to single connection');
+ const { address: ip, family: addressType } = toAttempt[0];
+
+ self._unrefTimer();
+ defaultTriggerAsyncIdScope(
+ self[async_id_symbol],
+ internalConnect,
+ self,
+ ip,
+ port,
+ addressType,
+ localAddress,
+ localPort,
+ );
+
+ return;
+ }
+
+ self.autoSelectFamilyAttemptedAddresses = [];
+ debug('connect/multiple: will try the following addresses', toAttempt);
+
+ const context = {
+ socket: self,
+ addresses: toAttempt,
+ current: 0,
+ port,
+ localPort,
+ timeout,
+ [kTimeout]: null,
+ errors: [],
+ };
+
+ self._unrefTimer();
+ defaultTriggerAsyncIdScope(self[async_id_symbol], internalConnectMultiple, context);
+ });
+ });
+}
+
+function connectErrorNT(self, err) {
+ self.destroy(err);
+}
+
+
+Socket.prototype.ref = function() {
+ if (!this._handle) {
+ this.once('connect', this.ref);
+ return this;
+ }
+
+ if (typeof this._handle.ref === 'function') {
+ this._handle.ref();
+ }
+
+ return this;
+};
+
+
+Socket.prototype.unref = function() {
+ if (!this._handle) {
+ this.once('connect', this.unref);
+ return this;
+ }
+
+ if (typeof this._handle.unref === 'function') {
+ this._handle.unref();
+ }
+
+ return this;
+};
+
+
+function afterConnect(status, handle, req, readable, writable) {
+ const self = handle[owner_symbol];
+
+ // Callback may come after call to destroy
+ if (self.destroyed) {
+ return;
+ }
+
+ debug('afterConnect');
+
+ assert(self.connecting);
+ self.connecting = false;
+ self._sockname = null;
+
+ if (status === 0) {
+ if (self.readable && !readable) {
+ self.push(null);
+ self.read();
+ }
+ if (self.writable && !writable) {
+ self.end();
+ }
+ self._unrefTimer();
+
+ if (self[kSetNoDelay] && self._handle.setNoDelay) {
+ self._handle.setNoDelay(true);
+ }
+
+ if (self[kSetKeepAlive] && self._handle.setKeepAlive) {
+ self._handle.setKeepAlive(true, self[kSetKeepAliveInitialDelay]);
+ }
+
+ self.emit('connect');
+ self.emit('ready');
+
+ // Start the first read, or get an immediate EOF.
+ // this doesn't actually consume any bytes, because len=0.
+ if (readable && !self.isPaused())
+ self.read(0);
+ if (self[kPerfHooksNetConnectContext] && hasObserver('net')) {
+ stopPerf(self, kPerfHooksNetConnectContext);
+ }
+ } else {
+ let details;
+ if (req.localAddress && req.localPort) {
+ details = req.localAddress + ':' + req.localPort;
+ }
+ const ex = new ExceptionWithHostPort(status,
+ 'connect',
+ req.address,
+ req.port,
+ details);
+ if (details) {
+ ex.localAddress = req.localAddress;
+ ex.localPort = req.localPort;
+ }
+
+ self.emit('connectionAttemptFailed', req.address, req.port, req.addressType, ex);
+ self.destroy(ex);
+ }
+}
+
+function addClientAbortSignalOption(self, options) {
+ validateAbortSignal(options.signal, 'options.signal');
+ const { signal } = options;
+ let disposable;
+
+ function onAbort() {
+ disposable?.[SymbolDispose]();
+ self._aborted = true;
+ }
+
+ if (signal.aborted) {
+ process.nextTick(onAbort);
+ } else {
+ process.nextTick(() => {
+ disposable = addAbortListener(signal, onAbort);
+ });
+ }
+}
+
+function createConnectionError(req, status) {
+ let details;
+
+ if (req.localAddress && req.localPort) {
+ details = req.localAddress + ':' + req.localPort;
+ }
+
+ const ex = new ExceptionWithHostPort(status,
+ 'connect',
+ req.address,
+ req.port,
+ details);
+ if (details) {
+ ex.localAddress = req.localAddress;
+ ex.localPort = req.localPort;
+ }
+
+ return ex;
+}
+
+function afterConnectMultiple(context, current, status, handle, req, readable, writable) {
+ debug('connect/multiple: connection attempt to %s:%s completed with status %s', req.address, req.port, status);
+
+ // Make sure another connection is not spawned
+ clearTimeout(context[kTimeout]);
+
+ // One of the connection has completed and correctly dispatched but after timeout, ignore this one
+ if (status === 0 && current !== context.current - 1) {
+ debug('connect/multiple: ignoring successful but timedout connection to %s:%s', req.address, req.port);
+ handle.close();
+ return;
+ }
+
+ const self = context.socket;
+
+ // Some error occurred, add to the list of exceptions
+ if (status !== 0) {
+ const ex = createConnectionError(req, status);
+ ArrayPrototypePush(context.errors, ex);
+
+ self.emit('connectionAttemptFailed', req.address, req.port, req.addressType, ex);
+
+ // Try the next address, unless we were aborted
+ if (context.socket.connecting) {
+ internalConnectMultiple(context, status === UV_ECANCELED);
+ }
+
+ return;
+ }
+
+ if (hasObserver('net')) {
+ startPerf(
+ self,
+ kPerfHooksNetConnectContext,
+ { type: 'net', name: 'connect', detail: { host: req.address, port: req.port } },
+ );
+ }
+
+ afterConnect(status, self._handle, req, readable, writable);
+}
+
+function internalConnectMultipleTimeout(context, req, handle) {
+ debug('connect/multiple: connection to %s:%s timed out', req.address, req.port);
+ context.socket.emit('connectionAttemptTimeout', req.address, req.port, req.addressType);
+
+ req.oncomplete = undefined;
+ ArrayPrototypePush(context.errors, createConnectionError(req, UV_ETIMEDOUT));
+ handle.close();
+
+ // Try the next address, unless we were aborted
+ if (context.socket.connecting) {
+ internalConnectMultiple(context);
+ }
+}
+
+function addServerAbortSignalOption(self, options) {
+ if (options?.signal === undefined) {
+ return;
+ }
+ validateAbortSignal(options.signal, 'options.signal');
+ const { signal } = options;
+ const onAborted = () => {
+ self.close();
+ };
+ if (signal.aborted) {
+ process.nextTick(onAborted);
+ } else {
+ const disposable = addAbortListener(signal, onAborted);
+ self.once('close', disposable[SymbolDispose]);
+ }
+}
+
+function Server(options, connectionListener) {
+ if (!(this instanceof Server))
+ return new Server(options, connectionListener);
+
+ EventEmitter.call(this);
+
+ if (typeof options === 'function') {
+ connectionListener = options;
+ options = kEmptyObject;
+ this.on('connection', connectionListener);
+ } else if (options == null || typeof options === 'object') {
+ options = { ...options };
+
+ if (typeof connectionListener === 'function') {
+ this.on('connection', connectionListener);
+ }
+ } else {
+ throw new ERR_INVALID_ARG_TYPE('options', 'Object', options);
+ }
+ if (options.keepAliveInitialDelay !== undefined) {
+ validateNumber(
+ options.keepAliveInitialDelay, 'options.keepAliveInitialDelay',
+ );
+
+ if (options.keepAliveInitialDelay < 0) {
+ options.keepAliveInitialDelay = 0;
+ }
+ }
+ if (options.highWaterMark !== undefined) {
+ validateNumber(
+ options.highWaterMark, 'options.highWaterMark',
+ );
+
+ if (options.highWaterMark < 0) {
+ options.highWaterMark = getDefaultHighWaterMark();
+ }
+ }
+
+ this._connections = 0;
+
+ this[async_id_symbol] = -1;
+ this._handle = null;
+ this._usingWorkers = false;
+ this._workers = [];
+ this._unref = false;
+ this._listeningId = 1;
+
+ this.allowHalfOpen = options.allowHalfOpen || false;
+ this.pauseOnConnect = !!options.pauseOnConnect;
+ this.noDelay = Boolean(options.noDelay);
+ this.keepAlive = Boolean(options.keepAlive);
+ this.keepAliveInitialDelay = ~~(options.keepAliveInitialDelay / 1000);
+ this.highWaterMark = options.highWaterMark ?? getDefaultHighWaterMark();
+ if (options.blockList) {
+ if (!module.exports.BlockList.isBlockList(options.blockList)) {
+ throw new ERR_INVALID_ARG_TYPE('options.blockList', 'net.BlockList', options.blockList);
+ }
+ this.blockList = options.blockList;
+ }
+}
+ObjectSetPrototypeOf(Server.prototype, EventEmitter.prototype);
+ObjectSetPrototypeOf(Server, EventEmitter);
+
+
+function toNumber(x) { return (x = Number(x)) >= 0 ? x : false; }
+
+// Returns handle if it can be created, or error code if it can't
+function createServerHandle(address, port, addressType, fd, flags) {
+ let err = 0;
+ // Assign handle in listen, and clean up if bind or listen fails
+ let handle;
+
+ let isTCP = false;
+ if (typeof fd === 'number' && fd >= 0) {
+ try {
+ handle = createHandle(fd, true);
+ } catch (e) {
+ // Not a fd we can listen on. This will trigger an error.
+ debug('listen invalid fd=%d:', fd, e.message);
+ return UV_EINVAL;
+ }
+
+ err = handle.open(fd);
+ if (err)
+ return err;
+
+ assert(!address && !port);
+ } else if (port === -1 && addressType === -1) {
+ handle = new Pipe(PipeConstants.SERVER);
+ if (isWindows) {
+ const instances = NumberParseInt(process.env.NODE_PENDING_PIPE_INSTANCES);
+ if (!NumberIsNaN(instances)) {
+ handle.setPendingInstances(instances);
+ }
+ }
+ } else {
+ handle = new TCP(TCPConstants.SERVER);
+ isTCP = true;
+ }
+
+ if (address || port || isTCP) {
+ debug('bind to', address || 'any');
+ if (!address) {
+ // Try binding to ipv6 first
+ err = handle.bind6(DEFAULT_IPV6_ADDR, port, flags);
+ if (err) {
+ handle.close();
+ // Fallback to ipv4
+ return createServerHandle(DEFAULT_IPV4_ADDR, port, undefined, undefined, flags);
+ }
+ } else if (addressType === 6) {
+ err = handle.bind6(address, port, flags);
+ } else {
+ err = handle.bind(address, port, flags);
+ }
+ }
+
+ if (err) {
+ handle.close();
+ return err;
+ }
+
+ return handle;
+}
+
+function setupListenHandle(address, port, addressType, backlog, fd, flags) {
+ debug('setupListenHandle', address, port, addressType, backlog, fd);
+
+ // If there is not yet a handle, we need to create one and bind.
+ // In the case of a server sent via IPC, we don't need to do this.
+ if (this._handle) {
+ debug('setupListenHandle: have a handle already');
+ } else {
+ debug('setupListenHandle: create a handle');
+
+ let rval = null;
+
+ // Try to bind to the unspecified IPv6 address, see if IPv6 is available
+ if (!address && typeof fd !== 'number') {
+ rval = createServerHandle(DEFAULT_IPV6_ADDR, port, 6, fd, flags);
+
+ if (typeof rval === 'number') {
+ rval = null;
+ address = DEFAULT_IPV4_ADDR;
+ addressType = 4;
+ } else {
+ address = DEFAULT_IPV6_ADDR;
+ addressType = 6;
+ }
+ }
+
+ if (rval === null)
+ rval = createServerHandle(address, port, addressType, fd, flags);
+
+ if (typeof rval === 'number') {
+ const error = new UVExceptionWithHostPort(rval, 'listen', address, port);
+
+ if (netServerListen.hasSubscribers) {
+ netServerListen.error.publish({ server: this, error });
+ }
+
+ process.nextTick(emitErrorNT, this, error);
+ return;
+ }
+ this._handle = rval;
+ }
+
+ this[async_id_symbol] = getNewAsyncId(this._handle);
+ this._handle.onconnection = onconnection;
+ this._handle[owner_symbol] = this;
+
+ // Use a backlog of 512 entries. We pass 511 to the listen() call because
+ // the kernel does: backlogsize = roundup_pow_of_two(backlogsize + 1);
+ // which will thus give us a backlog of 512 entries.
+ const err = this._handle.listen(backlog || 511);
+
+ if (err) {
+ const ex = new UVExceptionWithHostPort(err, 'listen', address, port);
+ this._handle.close();
+ this._handle = null;
+
+ if (netServerListen.hasSubscribers) {
+ netServerListen.error.publish({ server: this, error: ex });
+ }
+
+ defaultTriggerAsyncIdScope(this[async_id_symbol],
+ process.nextTick,
+ emitErrorNT,
+ this,
+ ex);
+ return;
+ }
+
+ if (netServerListen.hasSubscribers) {
+ netServerListen.asyncEnd.publish({ server: this });
+ }
+
+ // Generate connection key, this should be unique to the connection
+ this._connectionKey = addressType + ':' + address + ':' + port;
+
+ // Unref the handle if the server was unref'ed prior to listening
+ if (this._unref)
+ this.unref();
+
+ defaultTriggerAsyncIdScope(this[async_id_symbol],
+ process.nextTick,
+ emitListeningNT,
+ this);
+}
+
+Server.prototype._listen2 = setupListenHandle; // legacy alias
+
+function emitErrorNT(self, err) {
+ self.emit('error', err);
+}
+
+
+function emitListeningNT(self) {
+ // Ensure handle hasn't closed
+ if (self._handle)
+ self.emit('listening');
+}
+
+
+function listenInCluster(server, address, port, addressType,
+ backlog, fd, exclusive, flags, options) {
+ exclusive = !!exclusive;
+
+ if (cluster === undefined) cluster = require('cluster');
+
+ if (cluster.isPrimary || exclusive) {
+ // Will create a new handle
+ // _listen2 sets up the listened handle, it is still named like this
+ // to avoid breaking code that wraps this method
+ server._listen2(address, port, addressType, backlog, fd, flags);
+ return;
+ }
+
+ const serverQuery = {
+ address: address,
+ port: port,
+ addressType: addressType,
+ fd: fd,
+ flags,
+ backlog,
+ ...options,
+ };
+ const listeningId = server._listeningId;
+ // Get the primary's server handle, and listen on it
+ cluster._getServer(server, serverQuery, listenOnPrimaryHandle);
+ function listenOnPrimaryHandle(err, handle) {
+ if (listeningId !== server._listeningId) {
+ handle.close();
+ return;
+ }
+ err = checkBindError(err, port, handle);
+
+ if (err) {
+ const ex = new ExceptionWithHostPort(err, 'bind', address, port);
+ return server.emit('error', ex);
+ }
+ // If there was a handle, just close it to avoid fd leak
+ // but it doesn't look like that's going to happen right now
+ if (server._handle) {
+ server._handle.close();
+ }
+ // Reuse primary's server handle
+ server._handle = handle;
+ // _listen2 sets up the listened handle, it is still named like this
+ // to avoid breaking code that wraps this method
+ server._listen2(address, port, addressType, backlog, fd, flags);
+ }
+}
+
+
+Server.prototype.listen = function(...args) {
+ const normalized = normalizeArgs(args);
+ let options = normalized[0];
+ const cb = normalized[1];
+
+ if (this._handle) {
+ throw new ERR_SERVER_ALREADY_LISTEN();
+ }
+
+ if (netServerListen.hasSubscribers) {
+ netServerListen.asyncStart.publish({ server: this, options });
+ }
+
+ if (cb !== null) {
+ this.once('listening', cb);
+ }
+ const backlogFromArgs =
+ // (handle, backlog) or (path, backlog) or (port, backlog)
+ toNumber(args.length > 1 && args[1]) ||
+ toNumber(args.length > 2 && args[2]); // (port, host, backlog)
+
+ options = options._handle || options.handle || options;
+ const flags = getFlags(options);
+ // Refresh the id to make the previous call invalid
+ this._listeningId++;
+ // (handle[, backlog][, cb]) where handle is an object with a handle
+ if (options instanceof TCP) {
+ this._handle = options;
+ this[async_id_symbol] = this._handle.getAsyncId();
+ listenInCluster(this, null, -1, -1, backlogFromArgs, undefined, true);
+ return this;
+ }
+ addServerAbortSignalOption(this, options);
+ // (handle[, backlog][, cb]) where handle is an object with a fd
+ if (typeof options.fd === 'number' && options.fd >= 0) {
+ listenInCluster(this, null, null, null, backlogFromArgs, options.fd);
+ return this;
+ }
+
+ // ([port][, host][, backlog][, cb]) where port is omitted,
+ // that is, listen(), listen(null), listen(cb), or listen(null, cb)
+ // or (options[, cb]) where options.port is explicitly set as undefined or
+ // null, bind to an arbitrary unused port
+ if (args.length === 0 || typeof args[0] === 'function' ||
+ (options.port === undefined && 'port' in options) ||
+ options.port === null) {
+ options.port = 0;
+ }
+ // ([port][, host][, backlog][, cb]) where port is specified
+ // or (options[, cb]) where options.port is specified
+ // or if options.port is normalized as 0 before
+ let backlog;
+ if (typeof options.port === 'number' || typeof options.port === 'string') {
+ validatePort(options.port, 'options.port');
+ backlog = options.backlog || backlogFromArgs;
+ if (options.reusePort === true) {
+ options.exclusive = true;
+ }
+ // start TCP server listening on host:port
+ if (options.host) {
+ lookupAndListen(this, options.port | 0, options.host, backlog,
+ options.exclusive, flags);
+ } else { // Undefined host, listens on unspecified address
+ // Default addressType 4 will be used to search for primary server
+ listenInCluster(this, null, options.port | 0, 4,
+ backlog, undefined, options.exclusive, flags);
+ }
+ return this;
+ }
+
+ // (path[, backlog][, cb]) or (options[, cb])
+ // where path or options.path is a UNIX domain socket or Windows pipe
+ if (options.path && isPipeName(options.path)) {
+ // We can not call fchmod on abstract unix socket
+ if (options.path[0] === '\0' &&
+ (options.readableAll || options.writableAll)) {
+ const msg = 'can not set readableAll or writableAllt to true when path is abstract unix socket';
+ throw new ERR_INVALID_ARG_VALUE('options', options, msg);
+ }
+ const pipeName = this._pipeName = options.path;
+ backlog = options.backlog || backlogFromArgs;
+ listenInCluster(this,
+ pipeName,
+ -1,
+ -1,
+ backlog,
+ undefined,
+ options.exclusive,
+ undefined,
+ {
+ readableAll: options.readableAll,
+ writableAll: options.writableAll,
+ });
+
+ if (!this._handle) {
+ // Failed and an error shall be emitted in the next tick.
+ // Therefore, we directly return.
+ return this;
+ }
+
+ let mode = 0;
+ if (options.readableAll === true)
+ mode |= PipeConstants.UV_READABLE;
+ if (options.writableAll === true)
+ mode |= PipeConstants.UV_WRITABLE;
+ if (mode !== 0) {
+ const err = this._handle.fchmod(mode);
+ if (err) {
+ this._handle.close();
+ this._handle = null;
+ throw new ErrnoException(err, 'uv_pipe_chmod');
+ }
+ }
+ return this;
+ }
+
+ if (!(('port' in options) || ('path' in options))) {
+ throw new ERR_INVALID_ARG_VALUE('options', options,
+ 'must have the property "port" or "path"');
+ }
+
+ throw new ERR_INVALID_ARG_VALUE('options', options);
+};
+
+function isIpv6LinkLocal(ip) {
+ if (!isIPv6(ip)) { return false; }
+
+ const ipv6Buffer = convertIpv6StringToBuffer(ip);
+ const firstByte = ipv6Buffer[0]; // The first 8 bits
+ const secondByte = ipv6Buffer[1]; // The next 8 bits
+
+ // The link-local prefix is `1111111010`, which in hexadecimal is `fe80`
+ // First 8 bits (firstByte) should be `11111110` (0xfe)
+ // The next 2 bits of the second byte should be `10` (0x80)
+
+ const isFirstByteCorrect = (firstByte === 0xfe); // 0b11111110 == 0xfe
+ const isSecondByteCorrect = (secondByte & 0xc0) === 0x80; // 0b10xxxxxx == 0x80
+
+ return isFirstByteCorrect && isSecondByteCorrect;
+}
+
+function filterOnlyValidAddress(addresses) {
+ // Return the first non IPV6 link-local address if present
+ for (const address of addresses) {
+ if (!isIpv6LinkLocal(address.address)) {
+ return address;
+ }
+ }
+
+ // Otherwise return the first address
+ return addresses[0];
+}
+
+function lookupAndListen(self, port, address, backlog,
+ exclusive, flags) {
+ if (dns === undefined) dns = require('dns');
+ const listeningId = self._listeningId;
+
+ dns.lookup(address, { all: true }, (err, addresses) => {
+ if (listeningId !== self._listeningId) {
+ return;
+ }
+ if (err) {
+ self.emit('error', err);
+ } else {
+ const validAddress = filterOnlyValidAddress(addresses);
+ const family = validAddress?.family || 4;
+
+ listenInCluster(self, validAddress.address, port, family,
+ backlog, undefined, exclusive, flags);
+ }
+ });
+}
+
+ObjectDefineProperty(Server.prototype, 'listening', {
+ __proto__: null,
+ get: function() {
+ return !!this._handle;
+ },
+ configurable: true,
+ enumerable: true,
+});
+
+Server.prototype.address = function() {
+ if (this._handle?.getsockname) {
+ const out = {};
+ const err = this._handle.getsockname(out);
+ if (err) {
+ throw new ErrnoException(err, 'address');
+ }
+ return out;
+ } else if (this._pipeName) {
+ return this._pipeName;
+ }
+ return null;
+};
+
+function onconnection(err, clientHandle) {
+ const handle = this;
+ const self = handle[owner_symbol];
+
+ debug('onconnection');
+
+ if (err) {
+ self.emit('error', new ErrnoException(err, 'accept'));
+ return;
+ }
+
+ if (self.maxConnections != null && self._connections >= self.maxConnections) {
+ if (clientHandle.getsockname || clientHandle.getpeername) {
+ const data = { __proto__: null };
+ if (clientHandle.getsockname) {
+ const localInfo = { __proto__: null };
+ clientHandle.getsockname(localInfo);
+ data.localAddress = localInfo.address;
+ data.localPort = localInfo.port;
+ data.localFamily = localInfo.family;
+ }
+ if (clientHandle.getpeername) {
+ const remoteInfo = { __proto__: null };
+ clientHandle.getpeername(remoteInfo);
+ data.remoteAddress = remoteInfo.address;
+ data.remotePort = remoteInfo.port;
+ data.remoteFamily = remoteInfo.family;
+ }
+ self.emit('drop', data);
+ } else {
+ self.emit('drop');
+ }
+ clientHandle.close();
+ return;
+ }
+ if (self.blockList && typeof clientHandle.getpeername === 'function') {
+ const remoteInfo = { __proto__: null };
+ clientHandle.getpeername(remoteInfo);
+ const addressType = isIP(remoteInfo.address);
+ if (addressType && self.blockList.check(remoteInfo.address, `ipv${addressType}`)) {
+ clientHandle.close();
+ return;
+ }
+ }
+ const socket = new Socket({
+ handle: clientHandle,
+ allowHalfOpen: self.allowHalfOpen,
+ pauseOnCreate: self.pauseOnConnect,
+ readable: true,
+ writable: true,
+ readableHighWaterMark: self.highWaterMark,
+ writableHighWaterMark: self.highWaterMark,
+ });
+
+ if (self.noDelay && clientHandle.setNoDelay) {
+ socket[kSetNoDelay] = true;
+ clientHandle.setNoDelay(true);
+ }
+ if (self.keepAlive && clientHandle.setKeepAlive) {
+ socket[kSetKeepAlive] = true;
+ socket[kSetKeepAliveInitialDelay] = self.keepAliveInitialDelay;
+ clientHandle.setKeepAlive(true, self.keepAliveInitialDelay);
+ }
+
+ self._connections++;
+ socket.server = self;
+ socket._server = self;
+ self.emit('connection', socket);
+ if (netServerSocketChannel.hasSubscribers) {
+ netServerSocketChannel.publish({
+ socket,
+ });
+ }
+}
+
+/**
+ * Gets the number of concurrent connections on the server
+ * @param {Function} cb
+ * @returns {Server}
+ */
+
+Server.prototype.getConnections = function(cb) {
+ const self = this;
+
+ function end(err, connections) {
+ defaultTriggerAsyncIdScope(self[async_id_symbol],
+ process.nextTick,
+ cb,
+ err,
+ connections);
+ }
+
+ if (!this._usingWorkers) {
+ end(null, this._connections);
+ return this;
+ }
+
+ // Poll workers
+ let left = this._workers.length;
+ let total = this._connections;
+
+ function oncount(err, count) {
+ if (err) {
+ left = -1;
+ return end(err);
+ }
+
+ total += count;
+ if (--left === 0) return end(null, total);
+ }
+
+ for (let n = 0; n < this._workers.length; n++) {
+ this._workers[n].getConnections(oncount);
+ }
+
+ return this;
+};
+
+
+Server.prototype.close = function(cb) {
+ this._listeningId++;
+ if (typeof cb === 'function') {
+ if (!this._handle) {
+ this.once('close', function close() {
+ cb(new ERR_SERVER_NOT_RUNNING());
+ });
+ } else {
+ this.once('close', cb);
+ }
+ }
+
+ if (this._handle) {
+ this._handle.close();
+ this._handle = null;
+ }
+
+ if (this._usingWorkers) {
+ let left = this._workers.length;
+ const onWorkerClose = () => {
+ if (--left !== 0) return;
+
+ this._connections = 0;
+ this._emitCloseIfDrained();
+ };
+
+ // Increment connections to be sure that, even if all sockets will be closed
+ // during polling of workers, `close` event will be emitted only once.
+ this._connections++;
+
+ // Poll workers
+ for (let n = 0; n < this._workers.length; n++)
+ this._workers[n].close(onWorkerClose);
+ } else {
+ this._emitCloseIfDrained();
+ }
+
+ return this;
+};
+
+Server.prototype[SymbolAsyncDispose] = async function() {
+ if (!this._handle) {
+ return;
+ }
+ await FunctionPrototypeCall(promisify(this.close), this);
+};
+
+Server.prototype._emitCloseIfDrained = function() {
+ debug('SERVER _emitCloseIfDrained');
+
+ if (this._handle || this._connections) {
+ debug('SERVER handle? %j connections? %d',
+ !!this._handle, this._connections);
+ return;
+ }
+
+ defaultTriggerAsyncIdScope(this[async_id_symbol],
+ process.nextTick,
+ emitCloseNT,
+ this);
+};
+
+
+function emitCloseNT(self) {
+ debug('SERVER: emit close');
+ self.emit('close');
+}
+
+
+Server.prototype[EventEmitter.captureRejectionSymbol] = function(
+ err, event, sock) {
+
+ switch (event) {
+ case 'connection':
+ sock.destroy(err);
+ break;
+ default:
+ this.emit('error', err);
+ }
+};
+
+
+// Legacy alias on the C++ wrapper object. This is not public API, so we may
+// want to runtime-deprecate it at some point. There's no hurry, though.
+ObjectDefineProperty(TCP.prototype, 'owner', {
+ __proto__: null,
+ get() { return this[owner_symbol]; },
+ set(v) { return this[owner_symbol] = v; },
+});
+
+ObjectDefineProperty(Socket.prototype, '_handle', {
+ __proto__: null,
+ get() { return this[kHandle]; },
+ set(v) { return this[kHandle] = v; },
+});
+
+Server.prototype._setupWorker = function(socketList) {
+ this._usingWorkers = true;
+ this._workers.push(socketList);
+ socketList.once('exit', (socketList) => {
+ const index = ArrayPrototypeIndexOf(this._workers, socketList);
+ this._workers.splice(index, 1);
+ });
+};
+
+Server.prototype.ref = function() {
+ this._unref = false;
+
+ if (this._handle)
+ this._handle.ref();
+
+ return this;
+};
+
+Server.prototype.unref = function() {
+ this._unref = true;
+
+ if (this._handle)
+ this._handle.unref();
+
+ return this;
+};
+
+module.exports = {
+ _createServerHandle: createServerHandle,
+ _normalizeArgs: normalizeArgs,
+ get BlockList() {
+ BlockList ??= require('internal/blocklist').BlockList;
+ return BlockList;
+ },
+ get SocketAddress() {
+ SocketAddress ??= require('internal/socketaddress').SocketAddress;
+ return SocketAddress;
+ },
+ connect,
+ createConnection: connect,
+ createServer,
+ isIP: isIP,
+ isIPv4: isIPv4,
+ isIPv6: isIPv6,
+ Server,
+ Socket,
+ Stream: Socket, // Legacy naming
+ getDefaultAutoSelectFamily,
+ setDefaultAutoSelectFamily,
+ getDefaultAutoSelectFamilyAttemptTimeout,
+ setDefaultAutoSelectFamilyAttemptTimeout,
+};
\ No newline at end of file
diff --git a/.codesandbox/node/node_sea.cc b/.codesandbox/node/node_sea.cc
new file mode 100644
index 0000000..a1184d4
--- /dev/null
+++ b/.codesandbox/node/node_sea.cc
@@ -0,0 +1,895 @@
+#include "node_sea.h"
+
+#include "blob_serializer_deserializer-inl.h"
+#include "debug_utils-inl.h"
+#include "env-inl.h"
+#include "node_contextify.h"
+#include "node_errors.h"
+#include "node_external_reference.h"
+#include "node_internals.h"
+#include "node_options.h"
+#include "node_snapshot_builder.h"
+#include "node_union_bytes.h"
+#include "node_v8_platform-inl.h"
+#include "simdjson.h"
+#include "util-inl.h"
+
+// The POSTJECT_SENTINEL_FUSE macro is a string of random characters selected by
+// the Node.js project that is present only once in the entire binary. It is
+// used by the postject_has_resource() function to efficiently detect if a
+// resource has been injected. See
+// https://github.com/nodejs/postject/blob/35343439cac8c488f2596d7c4c1dddfec1fddcae/postject-api.h#L42-L45.
+#define POSTJECT_SENTINEL_FUSE "NODE_SEA_FUSE_fce680ab2cc467b6e072b8b5df1996b2"
+#include "postject-api.h"
+#undef POSTJECT_SENTINEL_FUSE
+
+#include
+#include
+#include
+#include
+
+using node::ExitCode;
+using v8::Array;
+using v8::ArrayBuffer;
+using v8::BackingStore;
+using v8::Context;
+using v8::Function;
+using v8::FunctionCallbackInfo;
+using v8::HandleScope;
+using v8::Isolate;
+using v8::Local;
+using v8::LocalVector;
+using v8::MaybeLocal;
+using v8::NewStringType;
+using v8::Object;
+using v8::ScriptCompiler;
+using v8::ScriptOrigin;
+using v8::String;
+using v8::Value;
+
+namespace node {
+namespace sea {
+
+namespace {
+
+SeaFlags operator|(SeaFlags x, SeaFlags y) {
+ return static_cast(static_cast(x) |
+ static_cast(y));
+}
+
+SeaFlags operator&(SeaFlags x, SeaFlags y) {
+ return static_cast(static_cast(x) &
+ static_cast(y));
+}
+
+SeaFlags operator|=(/* NOLINT (runtime/references) */ SeaFlags& x, SeaFlags y) {
+ return x = x | y;
+}
+
+class SeaSerializer : public BlobSerializer {
+ public:
+ SeaSerializer()
+ : BlobSerializer(
+ per_process::enabled_debug_list.enabled(DebugCategory::SEA)) {}
+
+ template ::value>* = nullptr,
+ std::enable_if_t::value>* = nullptr>
+ size_t Write(const T& data);
+};
+
+template <>
+size_t SeaSerializer::Write(const SeaResource& sea) {
+ sink.reserve(SeaResource::kHeaderSize + sea.main_code_or_snapshot.size());
+
+ Debug("Write SEA magic %x\n", kMagic);
+ size_t written_total = WriteArithmetic(kMagic);
+
+ uint32_t flags = static_cast(sea.flags);
+ Debug("Write SEA flags %x\n", flags);
+ written_total += WriteArithmetic(flags);
+
+ Debug("Write SEA resource exec argv extension %u\n",
+ static_cast(sea.exec_argv_extension));
+ written_total +=
+ WriteArithmetic(static_cast(sea.exec_argv_extension));
+ DCHECK_EQ(written_total, SeaResource::kHeaderSize);
+
+ Debug("Write SEA code path %p, size=%zu\n",
+ sea.code_path.data(),
+ sea.code_path.size());
+ written_total +=
+ WriteStringView(sea.code_path, StringLogMode::kAddressAndContent);
+
+ Debug("Write SEA resource %s %p, size=%zu\n",
+ sea.use_snapshot() ? "snapshot" : "code",
+ sea.main_code_or_snapshot.data(),
+ sea.main_code_or_snapshot.size());
+ written_total +=
+ WriteStringView(sea.main_code_or_snapshot,
+ sea.use_snapshot() ? StringLogMode::kAddressOnly
+ : StringLogMode::kAddressAndContent);
+
+ if (sea.code_cache.has_value()) {
+ Debug("Write SEA resource code cache %p, size=%zu\n",
+ sea.code_cache->data(),
+ sea.code_cache->size());
+ written_total +=
+ WriteStringView(sea.code_cache.value(), StringLogMode::kAddressOnly);
+ }
+
+ if (!sea.assets.empty()) {
+ Debug("Write SEA resource assets size %zu\n", sea.assets.size());
+ written_total += WriteArithmetic(sea.assets.size());
+ for (auto const& [key, content] : sea.assets) {
+ Debug("Write SEA resource asset %s at %p, size=%zu\n",
+ key,
+ content.data(),
+ content.size());
+ written_total += WriteStringView(key, StringLogMode::kAddressAndContent);
+ written_total += WriteStringView(content, StringLogMode::kAddressOnly);
+ }
+ }
+
+ if (static_cast(sea.flags & SeaFlags::kIncludeExecArgv)) {
+ Debug("Write SEA resource exec argv size %zu\n", sea.exec_argv.size());
+ written_total += WriteArithmetic(sea.exec_argv.size());
+ for (const auto& arg : sea.exec_argv) {
+ Debug("Write SEA resource exec arg %s at %p, size=%zu\n",
+ arg.data(),
+ arg.data(),
+ arg.size());
+ written_total += WriteStringView(arg, StringLogMode::kAddressAndContent);
+ }
+ }
+ return written_total;
+}
+
+class SeaDeserializer : public BlobDeserializer {
+ public:
+ explicit SeaDeserializer(std::string_view v)
+ : BlobDeserializer(
+ per_process::enabled_debug_list.enabled(DebugCategory::SEA), v) {}
+
+ template ::value>* = nullptr,
+ std::enable_if_t::value>* = nullptr>
+ T Read();
+};
+
+template <>
+SeaResource SeaDeserializer::Read() {
+ uint32_t magic = ReadArithmetic();
+ Debug("Read SEA magic %x\n", magic);
+
+ CHECK_EQ(magic, kMagic);
+ SeaFlags flags(static_cast(ReadArithmetic()));
+ Debug("Read SEA flags %x\n", static_cast(flags));
+
+ uint8_t extension_value = ReadArithmetic();
+ SeaExecArgvExtension exec_argv_extension =
+ static_cast(extension_value);
+ Debug("Read SEA resource exec argv extension %u\n", extension_value);
+ CHECK_EQ(read_total, SeaResource::kHeaderSize);
+
+ std::string_view code_path =
+ ReadStringView(StringLogMode::kAddressAndContent);
+ Debug(
+ "Read SEA code path %p, size=%zu\n", code_path.data(), code_path.size());
+
+ bool use_snapshot = static_cast(flags & SeaFlags::kUseSnapshot);
+ std::string_view code =
+ ReadStringView(use_snapshot ? StringLogMode::kAddressOnly
+ : StringLogMode::kAddressAndContent);
+
+ Debug("Read SEA resource %s %p, size=%zu\n",
+ use_snapshot ? "snapshot" : "code",
+ code.data(),
+ code.size());
+
+ std::string_view code_cache;
+ if (static_cast(flags & SeaFlags::kUseCodeCache)) {
+ code_cache = ReadStringView(StringLogMode::kAddressOnly);
+ Debug("Read SEA resource code cache %p, size=%zu\n",
+ code_cache.data(),
+ code_cache.size());
+ }
+
+ std::unordered_map assets;
+ if (static_cast(flags & SeaFlags::kIncludeAssets)) {
+ size_t assets_size = ReadArithmetic();
+ Debug("Read SEA resource assets size %zu\n", assets_size);
+ for (size_t i = 0; i < assets_size; ++i) {
+ std::string_view key = ReadStringView(StringLogMode::kAddressAndContent);
+ std::string_view content = ReadStringView(StringLogMode::kAddressOnly);
+ Debug("Read SEA resource asset %s at %p, size=%zu\n",
+ key,
+ content.data(),
+ content.size());
+ assets.emplace(key, content);
+ }
+ }
+
+ std::vector exec_argv;
+ if (static_cast(flags & SeaFlags::kIncludeExecArgv)) {
+ size_t exec_argv_size = ReadArithmetic();
+ Debug("Read SEA resource exec args size %zu\n", exec_argv_size);
+ exec_argv.reserve(exec_argv_size);
+ for (size_t i = 0; i < exec_argv_size; ++i) {
+ std::string_view arg = ReadStringView(StringLogMode::kAddressAndContent);
+ Debug("Read SEA resource exec arg %s at %p, size=%zu\n",
+ arg.data(),
+ arg.data(),
+ arg.size());
+ exec_argv.emplace_back(arg);
+ }
+ }
+ return {flags,
+ exec_argv_extension,
+ code_path,
+ code,
+ code_cache,
+ assets,
+ exec_argv};
+}
+
+std::string_view FindSingleExecutableBlob() {
+#if !defined(DISABLE_SINGLE_EXECUTABLE_APPLICATION)
+ CHECK(IsSingleExecutable());
+ static const std::string_view result = []() -> std::string_view {
+ size_t size;
+#ifdef __APPLE__
+ postject_options options;
+ postject_options_init(&options);
+ options.macho_segment_name = "NODE_SEA";
+ const char* blob = static_cast(
+ postject_find_resource("NODE_SEA_BLOB", &size, &options));
+#else
+ const char* blob = static_cast(
+ postject_find_resource("NODE_SEA_BLOB", &size, nullptr));
+#endif
+ return {blob, size};
+ }();
+ per_process::Debug(DebugCategory::SEA,
+ "Found SEA blob %p, size=%zu\n",
+ result.data(),
+ result.size());
+ return result;
+#else
+ UNREACHABLE();
+#endif // !defined(DISABLE_SINGLE_EXECUTABLE_APPLICATION)
+}
+
+} // anonymous namespace
+
+bool SeaResource::use_snapshot() const {
+ return static_cast(flags & SeaFlags::kUseSnapshot);
+}
+
+bool SeaResource::use_code_cache() const {
+ return static_cast(flags & SeaFlags::kUseCodeCache);
+}
+
+SeaResource FindSingleExecutableResource() {
+ static const SeaResource sea_resource = []() -> SeaResource {
+ std::string_view blob = FindSingleExecutableBlob();
+ per_process::Debug(DebugCategory::SEA,
+ "Found SEA resource %p, size=%zu\n",
+ blob.data(),
+ blob.size());
+ SeaDeserializer deserializer(blob);
+ return deserializer.Read();
+ }();
+ return sea_resource;
+}
+
+bool IsSingleExecutable() {
+ return postject_has_resource();
+}
+
+void IsSea(const FunctionCallbackInfo& args) {
+ args.GetReturnValue().Set(IsSingleExecutable());
+}
+
+void IsExperimentalSeaWarningNeeded(const FunctionCallbackInfo& args) {
+ bool is_building_sea =
+ !per_process::cli_options->experimental_sea_config.empty();
+ if (is_building_sea) {
+ args.GetReturnValue().Set(true);
+ return;
+ }
+
+ if (!IsSingleExecutable()) {
+ args.GetReturnValue().Set(false);
+ return;
+ }
+
+ SeaResource sea_resource = FindSingleExecutableResource();
+ args.GetReturnValue().Set(!static_cast(
+ sea_resource.flags & SeaFlags::kDisableExperimentalSeaWarning));
+}
+
+std::tuple FixupArgsForSEA(int argc, char** argv) {
+ // Repeats argv[0] at position 1 on argv as a replacement for the missing
+ // entry point file path.
+ if (IsSingleExecutable()) {
+ static std::vector new_argv;
+ static std::vector exec_argv_storage;
+ static std::vector cli_extension_args;
+
+ SeaResource sea_resource = FindSingleExecutableResource();
+
+ new_argv.clear();
+ exec_argv_storage.clear();
+ cli_extension_args.clear();
+
+ // Handle CLI extension mode for --node-options
+ if (sea_resource.exec_argv_extension == SeaExecArgvExtension::kCli) {
+ // Extract --node-options and filter argv
+ for (int i = 1; i < argc; ++i) {
+ if (strncmp(argv[i], "--node-options=", 15) == 0) {
+ std::string node_options = argv[i] + 15;
+ std::vector errors;
+ cli_extension_args = ParseNodeOptionsEnvVar(node_options, &errors);
+ // Remove this argument by shifting the rest
+ for (int j = i; j < argc - 1; ++j) {
+ argv[j] = argv[j + 1];
+ }
+ argc--;
+ i--; // Adjust index since we removed an element
+ }
+ }
+ }
+
+ // Reserve space for argv[0], exec argv, cli extension args, original argv,
+ // and nullptr
+ new_argv.reserve(argc + sea_resource.exec_argv.size() +
+ cli_extension_args.size() + 2);
+ new_argv.emplace_back(argv[0]);
+
+ // Insert exec argv from SEA config
+ if (!sea_resource.exec_argv.empty()) {
+ exec_argv_storage.reserve(sea_resource.exec_argv.size() +
+ cli_extension_args.size());
+ for (const auto& arg : sea_resource.exec_argv) {
+ exec_argv_storage.emplace_back(arg);
+ new_argv.emplace_back(exec_argv_storage.back().data());
+ }
+ }
+
+ // Insert CLI extension args
+ for (const auto& arg : cli_extension_args) {
+ exec_argv_storage.emplace_back(arg);
+ new_argv.emplace_back(exec_argv_storage.back().data());
+ }
+
+ // Add actual run time arguments
+ new_argv.insert(new_argv.end(), argv, argv + argc);
+ new_argv.emplace_back(nullptr);
+ argc = new_argv.size() - 1;
+ argv = new_argv.data();
+ }
+
+ return {argc, argv};
+}
+
+namespace {
+
+struct SeaConfig {
+ std::string main_path;
+ std::string output_path;
+ SeaFlags flags = SeaFlags::kDefault;
+ SeaExecArgvExtension exec_argv_extension = SeaExecArgvExtension::kEnv;
+ std::unordered_map assets;
+ std::vector exec_argv;
+};
+
+std::optional ParseSingleExecutableConfig(
+ const std::string& config_path) {
+ std::string config;
+ int r = ReadFileSync(&config, config_path.c_str());
+ if (r != 0) {
+ const char* err = uv_strerror(r);
+ FPrintF(stderr,
+ "Cannot read single executable configuration from %s: %s\n",
+ config_path,
+ err);
+ return std::nullopt;
+ }
+
+ SeaConfig result;
+
+ simdjson::ondemand::parser parser;
+ simdjson::ondemand::document document;
+ simdjson::ondemand::object main_object;
+ simdjson::error_code error =
+ parser.iterate(simdjson::pad(config)).get(document);
+
+ if (!error) {
+ error = document.get_object().get(main_object);
+ }
+ if (error) {
+ FPrintF(stderr,
+ "Cannot parse JSON from %s: %s\n",
+ config_path,
+ simdjson::error_message(error));
+ return std::nullopt;
+ }
+
+ bool use_snapshot_value = false;
+ bool use_code_cache_value = false;
+
+ for (auto field : main_object) {
+ std::string_view key;
+ if (field.unescaped_key().get(key)) {
+ FPrintF(stderr, "Cannot read key from %s\n", config_path);
+ return std::nullopt;
+ }
+ if (key == "main") {
+ if (field.value().get_string().get(result.main_path) ||
+ result.main_path.empty()) {
+ FPrintF(stderr,
+ "\"main\" field of %s is not a non-empty string\n",
+ config_path);
+ return std::nullopt;
+ }
+ } else if (key == "output") {
+ if (field.value().get_string().get(result.output_path) ||
+ result.output_path.empty()) {
+ FPrintF(stderr,
+ "\"output\" field of %s is not a non-empty string\n",
+ config_path);
+ return std::nullopt;
+ }
+ } else if (key == "disableExperimentalSEAWarning") {
+ bool disable_experimental_sea_warning;
+ if (field.value().get_bool().get(disable_experimental_sea_warning)) {
+ FPrintF(
+ stderr,
+ "\"disableExperimentalSEAWarning\" field of %s is not a Boolean\n",
+ config_path);
+ return std::nullopt;
+ }
+ if (disable_experimental_sea_warning) {
+ result.flags |= SeaFlags::kDisableExperimentalSeaWarning;
+ }
+ } else if (key == "useSnapshot") {
+ if (field.value().get_bool().get(use_snapshot_value)) {
+ FPrintF(stderr,
+ "\"useSnapshot\" field of %s is not a Boolean\n",
+ config_path);
+ return std::nullopt;
+ }
+ if (use_snapshot_value) {
+ result.flags |= SeaFlags::kUseSnapshot;
+ }
+ } else if (key == "useCodeCache") {
+ if (field.value().get_bool().get(use_code_cache_value)) {
+ FPrintF(stderr,
+ "\"useCodeCache\" field of %s is not a Boolean\n",
+ config_path);
+ return std::nullopt;
+ }
+ if (use_code_cache_value) {
+ result.flags |= SeaFlags::kUseCodeCache;
+ }
+ } else if (key == "assets") {
+ simdjson::ondemand::object assets_object;
+ if (field.value().get_object().get(assets_object)) {
+ FPrintF(stderr,
+ "\"assets\" field of %s is not a map of strings\n",
+ config_path);
+ return std::nullopt;
+ }
+ simdjson::ondemand::value asset_value;
+ for (auto asset_field : assets_object) {
+ std::string_view key_str;
+ std::string_view value_str;
+ if (asset_field.unescaped_key().get(key_str) ||
+ asset_field.value().get(asset_value) ||
+ asset_value.get_string().get(value_str)) {
+ FPrintF(stderr,
+ "\"assets\" field of %s is not a map of strings\n",
+ config_path);
+ return std::nullopt;
+ }
+
+ result.assets.emplace(key_str, value_str);
+ }
+
+ if (!result.assets.empty()) {
+ result.flags |= SeaFlags::kIncludeAssets;
+ }
+ } else if (key == "execArgv") {
+ simdjson::ondemand::array exec_argv_array;
+ if (field.value().get_array().get(exec_argv_array)) {
+ FPrintF(stderr,
+ "\"execArgv\" field of %s is not an array of strings\n",
+ config_path);
+ return std::nullopt;
+ }
+ std::vector exec_argv;
+ for (auto argv : exec_argv_array) {
+ std::string_view argv_str;
+ if (argv.get_string().get(argv_str)) {
+ FPrintF(stderr,
+ "\"execArgv\" field of %s is not an array of strings\n",
+ config_path);
+ return std::nullopt;
+ }
+ exec_argv.emplace_back(argv_str);
+ }
+ if (!exec_argv.empty()) {
+ result.flags |= SeaFlags::kIncludeExecArgv;
+ result.exec_argv = std::move(exec_argv);
+ }
+ } else if (key == "execArgvExtension") {
+ std::string_view extension_str;
+ if (field.value().get_string().get(extension_str)) {
+ FPrintF(stderr,
+ "\"execArgvExtension\" field of %s is not a string\n",
+ config_path);
+ return std::nullopt;
+ }
+ if (extension_str == "none") {
+ result.exec_argv_extension = SeaExecArgvExtension::kNone;
+ } else if (extension_str == "env") {
+ result.exec_argv_extension = SeaExecArgvExtension::kEnv;
+ } else if (extension_str == "cli") {
+ result.exec_argv_extension = SeaExecArgvExtension::kCli;
+ } else {
+ FPrintF(stderr,
+ "\"execArgvExtension\" field of %s must be one of "
+ "\"none\", \"env\", or \"cli\"\n",
+ config_path);
+ return std::nullopt;
+ }
+ }
+ }
+
+ if (static_cast(result.flags & SeaFlags::kUseSnapshot) &&
+ static_cast(result.flags & SeaFlags::kUseCodeCache)) {
+ // TODO(joyeecheung): code cache in snapshot should be configured by
+ // separate snapshot configurations.
+ FPrintF(stderr,
+ "\"useCodeCache\" is redundant when \"useSnapshot\" is true\n");
+ }
+
+ if (result.main_path.empty()) {
+ FPrintF(stderr,
+ "\"main\" field of %s is not a non-empty string\n",
+ config_path);
+ return std::nullopt;
+ }
+
+ if (result.output_path.empty()) {
+ FPrintF(stderr,
+ "\"output\" field of %s is not a non-empty string\n",
+ config_path);
+ return std::nullopt;
+ }
+
+ return result;
+}
+
+ExitCode GenerateSnapshotForSEA(const SeaConfig& config,
+ const std::vector& args,
+ const std::vector& exec_args,
+ const std::string& builder_script_content,
+ const SnapshotConfig& snapshot_config,
+ std::vector* snapshot_blob) {
+ SnapshotData snapshot;
+ // TODO(joyeecheung): make the arguments configurable through the JSON
+ // config or a programmatic API.
+ std::vector patched_args = {args[0], config.main_path};
+ ExitCode exit_code = SnapshotBuilder::Generate(&snapshot,
+ patched_args,
+ exec_args,
+ builder_script_content,
+ snapshot_config);
+ if (exit_code != ExitCode::kNoFailure) {
+ return exit_code;
+ }
+ auto& persistents = snapshot.env_info.principal_realm.persistent_values;
+ auto it = std::ranges::find_if(persistents, [](const PropInfo& prop) {
+ return prop.name == "snapshot_deserialize_main";
+ });
+ if (it == persistents.end()) {
+ FPrintF(
+ stderr,
+ "%s does not invoke "
+ "v8.startupSnapshot.setDeserializeMainFunction(), which is required "
+ "for snapshot scripts used to build single executable applications."
+ "\n",
+ config.main_path);
+ return ExitCode::kGenericUserError;
+ }
+ // We need the temporary variable for copy elision.
+ std::vector temp = snapshot.ToBlob();
+ *snapshot_blob = std::move(temp);
+ return ExitCode::kNoFailure;
+}
+
+std::optional GenerateCodeCache(std::string_view main_path,
+ std::string_view main_script) {
+ RAIIIsolate raii_isolate(SnapshotBuilder::GetEmbeddedSnapshotData());
+ Isolate* isolate = raii_isolate.get();
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ HandleScope handle_scope(isolate);
+
+ Local context = Context::New(isolate);
+ Context::Scope context_scope(context);
+
+ errors::PrinterTryCatch bootstrapCatch(
+ isolate, errors::PrinterTryCatch::kPrintSourceLine);
+
+ Local filename;
+ if (!String::NewFromUtf8(isolate,
+ main_path.data(),
+ NewStringType::kNormal,
+ main_path.length())
+ .ToLocal(&filename)) {
+ return std::nullopt;
+ }
+
+ Local content;
+ if (!String::NewFromUtf8(isolate,
+ main_script.data(),
+ NewStringType::kNormal,
+ main_script.length())
+ .ToLocal(&content)) {
+ return std::nullopt;
+ }
+
+ LocalVector parameters(
+ isolate,
+ {
+ FIXED_ONE_BYTE_STRING(isolate, "exports"),
+ FIXED_ONE_BYTE_STRING(isolate, "require"),
+ FIXED_ONE_BYTE_STRING(isolate, "module"),
+ FIXED_ONE_BYTE_STRING(isolate, "__filename"),
+ FIXED_ONE_BYTE_STRING(isolate, "__dirname"),
+ });
+ ScriptOrigin script_origin(filename, 0, 0, true);
+ ScriptCompiler::Source script_source(content, script_origin);
+ MaybeLocal maybe_fn =
+ ScriptCompiler::CompileFunction(context,
+ &script_source,
+ parameters.size(),
+ parameters.data(),
+ 0,
+ nullptr);
+ Local fn;
+ if (!maybe_fn.ToLocal(&fn)) {
+ return std::nullopt;
+ }
+
+ // TODO(RaisinTen): Using the V8 code cache prevents us from using `import()`
+ // in the SEA code. Support it.
+ // Refs: https://github.com/nodejs/node/pull/48191#discussion_r1213271430
+ std::unique_ptr cache{
+ ScriptCompiler::CreateCodeCacheForFunction(fn)};
+ std::string code_cache(cache->data, cache->data + cache->length);
+ return code_cache;
+}
+
+int BuildAssets(const std::unordered_map& config,
+ std::unordered_map* assets) {
+ for (auto const& [key, path] : config) {
+ std::string blob;
+ int r = ReadFileSync(&blob, path.c_str());
+ if (r != 0) {
+ const char* err = uv_strerror(r);
+ FPrintF(stderr, "Cannot read asset %s: %s\n", path.c_str(), err);
+ return r;
+ }
+ assets->emplace(key, std::move(blob));
+ }
+ return 0;
+}
+
+ExitCode GenerateSingleExecutableBlob(
+ const SeaConfig& config,
+ const std::vector& args,
+ const std::vector& exec_args) {
+ std::string main_script;
+ // TODO(joyeecheung): unify the file utils.
+ int r = ReadFileSync(&main_script, config.main_path.c_str());
+ if (r != 0) {
+ const char* err = uv_strerror(r);
+ FPrintF(stderr, "Cannot read main script %s:%s\n", config.main_path, err);
+ return ExitCode::kGenericUserError;
+ }
+
+ std::vector snapshot_blob;
+ bool builds_snapshot_from_main =
+ static_cast(config.flags & SeaFlags::kUseSnapshot);
+ if (builds_snapshot_from_main) {
+ // TODO(joyeecheung): allow passing snapshot configuration in SEA configs.
+ SnapshotConfig snapshot_config;
+ snapshot_config.builder_script_path = main_script;
+ ExitCode exit_code = GenerateSnapshotForSEA(
+ config, args, exec_args, main_script, snapshot_config, &snapshot_blob);
+ if (exit_code != ExitCode::kNoFailure) {
+ return exit_code;
+ }
+ }
+
+ std::optional optional_sv_code_cache;
+ std::string code_cache;
+ if (static_cast(config.flags & SeaFlags::kUseCodeCache)) {
+ std::optional optional_code_cache =
+ GenerateCodeCache(config.main_path, main_script);
+ if (!optional_code_cache.has_value()) {
+ FPrintF(stderr, "Cannot generate V8 code cache\n");
+ return ExitCode::kGenericUserError;
+ }
+ code_cache = optional_code_cache.value();
+ optional_sv_code_cache = code_cache;
+ }
+
+ std::unordered_map assets;
+ if (!config.assets.empty() && BuildAssets(config.assets, &assets) != 0) {
+ return ExitCode::kGenericUserError;
+ }
+ std::unordered_map assets_view;
+ for (auto const& [key, content] : assets) {
+ assets_view.emplace(key, content);
+ }
+ std::vector exec_argv_view;
+ for (const auto& arg : config.exec_argv) {
+ exec_argv_view.emplace_back(arg);
+ }
+ SeaResource sea{
+ config.flags,
+ config.exec_argv_extension,
+ config.main_path,
+ builds_snapshot_from_main
+ ? std::string_view{snapshot_blob.data(), snapshot_blob.size()}
+ : std::string_view{main_script.data(), main_script.size()},
+ optional_sv_code_cache,
+ assets_view,
+ exec_argv_view};
+
+ SeaSerializer serializer;
+ serializer.Write(sea);
+
+ uv_buf_t buf = uv_buf_init(serializer.sink.data(), serializer.sink.size());
+ r = WriteFileSync(config.output_path.c_str(), buf);
+ if (r != 0) {
+ const char* err = uv_strerror(r);
+ FPrintF(stderr, "Cannot write output to %s:%s\n", config.output_path, err);
+ return ExitCode::kGenericUserError;
+ }
+
+ FPrintF(stderr,
+ "Wrote single executable preparation blob to %s\n",
+ config.output_path);
+ return ExitCode::kNoFailure;
+}
+
+} // anonymous namespace
+
+ExitCode BuildSingleExecutableBlob(const std::string& config_path,
+ const std::vector& args,
+ const std::vector& exec_args) {
+ std::optional config_opt =
+ ParseSingleExecutableConfig(config_path);
+ if (config_opt.has_value()) {
+ ExitCode code =
+ GenerateSingleExecutableBlob(config_opt.value(), args, exec_args);
+ return code;
+ }
+
+ return ExitCode::kGenericUserError;
+}
+
+void GetAsset(const FunctionCallbackInfo& args) {
+ CHECK_EQ(args.Length(), 1);
+ CHECK(args[0]->IsString());
+ Utf8Value key(args.GetIsolate(), args[0]);
+ SeaResource sea_resource = FindSingleExecutableResource();
+ if (sea_resource.assets.empty()) {
+ return;
+ }
+ auto it = sea_resource.assets.find(*key);
+ if (it == sea_resource.assets.end()) {
+ return;
+ }
+ // We cast away the constness here, the JS land should ensure that
+ // the data is not mutated.
+ std::unique_ptr store = ArrayBuffer::NewBackingStore(
+ const_cast(it->second.data()),
+ it->second.size(),
+ [](void*, size_t, void*) {},
+ nullptr);
+ Local ab = ArrayBuffer::New(args.GetIsolate(), std::move(store));
+ args.GetReturnValue().Set(ab);
+}
+
+void GetAssetKeys(const FunctionCallbackInfo& args) {
+ CHECK_EQ(args.Length(), 0);
+ Isolate* isolate = args.GetIsolate();
+ SeaResource sea_resource = FindSingleExecutableResource();
+
+ Local context = isolate->GetCurrentContext();
+ LocalVector keys(isolate);
+ keys.reserve(sea_resource.assets.size());
+ for (const auto& [key, _] : sea_resource.assets) {
+ Local key_str;
+ if (!ToV8Value(context, key).ToLocal(&key_str)) {
+ return;
+ }
+ keys.push_back(key_str);
+ }
+ Local result = Array::New(isolate, keys.data(), keys.size());
+ args.GetReturnValue().Set(result);
+}
+
+MaybeLocal LoadSingleExecutableApplication(
+ const StartExecutionCallbackInfo& info) {
+ // Here we are currently relying on the fact that in NodeMainInstance::Run(),
+ // env->context() is entered.
+ Local context = Isolate::GetCurrent()->GetCurrentContext();
+ Environment* env = Environment::GetCurrent(context);
+ SeaResource sea = FindSingleExecutableResource();
+
+ CHECK(!sea.use_snapshot());
+ // TODO(joyeecheung): this should be an external string. Refactor UnionBytes
+ // and make it easy to create one based on static content on the fly.
+ Local main_script =
+ ToV8Value(env->context(), sea.main_code_or_snapshot).ToLocalChecked();
+ return info.run_cjs->Call(
+ env->context(), Null(env->isolate()), 1, &main_script);
+}
+
+bool MaybeLoadSingleExecutableApplication(Environment* env) {
+#ifndef DISABLE_SINGLE_EXECUTABLE_APPLICATION
+ if (!IsSingleExecutable()) {
+ return false;
+ }
+
+ SeaResource sea = FindSingleExecutableResource();
+
+ if (sea.use_snapshot()) {
+ // The SEA preparation blob building process should already enforce this,
+ // this check is just here to guard against the unlikely case where
+ // the SEA preparation blob has been manually modified by someone.
+ CHECK(!env->snapshot_deserialize_main().IsEmpty());
+ LoadEnvironment(env, StartExecutionCallback{});
+ return true;
+ }
+
+ LoadEnvironment(env, LoadSingleExecutableApplication);
+ return true;
+#else
+ return false;
+#endif
+}
+
+void Initialize(Local
+Welcome to Special Garden Group
+
+
+
+
+autoplay
+loop
+muted
+playinline
+class="hero-video">
+
+Welcome to join the club
+
+
+
+
+ "
+
+
+
+
+
+
+
+
+
1 / 6
+

+
+
+
2 / 6
+

+
+
+
3 / 6
+

+
+
+
4 / 6
+

+
+
+
5 / 6
+

+
+
+
6 / 6
+

+
+
+
❮
+
❯
+
+
+
+
+
+

+
+
+

+
+
+

+
+
+

+
+
+

+
+
+

+
+
+
+
+
+
+
+
+<
+
+
+
+
+
+
+
+Document
+
+
+
+
+
+ Hero video
+
+
+
+Hello and welcome to Special Garden Group
+
+
+
+
+
+autoplay
+loop
+muted
+playinline
+class="hero-video">
+
+ Special Garden Group
+
+
+
+video
+
+
+
+
+
+
+
+ Special Garden Group video
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+