From 98789c68c600bc2da4c96f689c7b7d8141206960 Mon Sep 17 00:00:00 2001 From: VK <112831093+vkozio@users.noreply.github.com> Date: Wed, 18 Feb 2026 01:48:15 +0300 Subject: [PATCH 1/2] chore(nodejs): Node 20, tooling, node:test, testing guide --- docs/testing.md | 6 + tools/nodejs_api/CHANGELOG.md | 9 + tools/nodejs_api/CMakeLists.txt | 29 +- tools/nodejs_api/README.md | 290 +++++++++++++++- tools/nodejs_api/build.js | 19 ++ tools/nodejs_api/copy_src_to_build.js | 22 ++ tools/nodejs_api/docs/API.md | 319 ++++++++++++++++++ tools/nodejs_api/docs/database_locked.md | 40 +++ .../docs/execution_chain_analysis.md | 65 ++++ tools/nodejs_api/docs/nodejs_testing.md | 66 ++++ tools/nodejs_api/examples/README.md | 11 + tools/nodejs_api/examples/quickstart.mjs | 32 ++ tools/nodejs_api/examples/stream-load.mjs | 29 ++ tools/nodejs_api/index.js | 4 + tools/nodejs_api/index.mjs | 12 + tools/nodejs_api/install.js | 80 +++-- tools/nodejs_api/package.json | 27 +- .../src_cpp/include/node_connection.h | 8 + .../include/node_progress_bar_display.h | 2 + .../src_cpp/include/node_query_result.h | 4 +- .../src_cpp/include/node_scan_replacement.h | 60 ++++ .../src_cpp/include/node_stream_scan.h | 29 ++ tools/nodejs_api/src_cpp/include/node_util.h | 2 +- tools/nodejs_api/src_cpp/node_connection.cpp | 156 ++++++++- .../src_cpp/node_progress_bar_display.cpp | 8 + .../nodejs_api/src_cpp/node_query_result.cpp | 12 + .../src_cpp/node_scan_replacement.cpp | 134 ++++++++ tools/nodejs_api/src_cpp/node_stream_scan.cpp | 103 ++++++ tools/nodejs_api/src_js/connection.js | 284 +++++++++++++++- tools/nodejs_api/src_js/database.js | 89 ++++- tools/nodejs_api/src_js/index.js | 4 + tools/nodejs_api/src_js/index.mjs | 3 + tools/nodejs_api/src_js/lbug.d.ts | 195 ++++++++++- tools/nodejs_api/src_js/pool.js | 222 ++++++++++++ tools/nodejs_api/src_js/query_result.js | 75 +++- tools/nodejs_api/test/common.js | 64 ++-- tools/nodejs_api/test/test.js | 44 ++- tools/nodejs_api/test/test_concurrency.js | 58 ++-- tools/nodejs_api/test/test_connection.js | 126 ++++++- tools/nodejs_api/test/test_data_type.js | 7 +- tools/nodejs_api/test/test_database.js | 170 +++------- tools/nodejs_api/test/test_parameter.js | 10 +- tools/nodejs_api/test/test_pool.js | 161 +++++++++ tools/nodejs_api/test/test_query_result.js | 72 +++- tools/nodejs_api/test/test_register_stream.js | 79 +++++ tools/nodejs_api/test/test_resilience.js | 184 ++++++++++ tools/nodejs_api/test/test_sync_api.js | 2 - tools/nodejs_api/test/test_version.js | 2 - 48 files changed, 3127 insertions(+), 302 deletions(-) create mode 100644 tools/nodejs_api/CHANGELOG.md create mode 100644 tools/nodejs_api/copy_src_to_build.js create mode 100644 tools/nodejs_api/docs/API.md create mode 100644 tools/nodejs_api/docs/database_locked.md create mode 100644 tools/nodejs_api/docs/execution_chain_analysis.md create mode 100644 tools/nodejs_api/docs/nodejs_testing.md create mode 100644 tools/nodejs_api/examples/README.md create mode 100644 tools/nodejs_api/examples/quickstart.mjs create mode 100644 tools/nodejs_api/examples/stream-load.mjs create mode 100644 tools/nodejs_api/index.js create mode 100644 tools/nodejs_api/index.mjs create mode 100644 tools/nodejs_api/src_cpp/include/node_scan_replacement.h create mode 100644 tools/nodejs_api/src_cpp/include/node_stream_scan.h create mode 100644 tools/nodejs_api/src_cpp/node_scan_replacement.cpp create mode 100644 tools/nodejs_api/src_cpp/node_stream_scan.cpp create mode 100644 tools/nodejs_api/src_js/pool.js create mode 100644 tools/nodejs_api/test/test_pool.js create mode 100644 tools/nodejs_api/test/test_register_stream.js create mode 100644 tools/nodejs_api/test/test_resilience.js diff --git a/docs/testing.md b/docs/testing.md index da5927a444..3a8a889431 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -35,6 +35,12 @@ TEST_F(MyTest, TestCaseName) { - `test/planner/` - Query planner tests - `test/optimizer/` - Query optimizer tests +## Node.js API + +Tests live in `tools/nodejs_api/test/` and use the Node.js built-in test runner (`node --test`). Run with `npm test` from `tools/nodejs_api/`. + +For guidelines on writing and reviewing these tests, see [Node.js API — Testing Guide](../tools/nodejs_api/docs/nodejs_testing.md). + ## Running Tests See `AGENTS.md` for build and test commands. diff --git a/tools/nodejs_api/CHANGELOG.md b/tools/nodejs_api/CHANGELOG.md new file mode 100644 index 0000000000..853b2df74e --- /dev/null +++ b/tools/nodejs_api/CHANGELOG.md @@ -0,0 +1,9 @@ +## Changelog + +### Unreleased + +- **Breaking:** Drop support for Node.js versions lower than 20; the package now requires **Node.js 20 or later** (`engines.node: ">=20.0.0"`). +- **Breaking:** Upgrade native build tooling to **`cmake-js` ^8.0.0** and **`node-addon-api` ^8.0.0**, aligning with the Node.js 20+ support window. +- Clarify Node.js version requirement in the README. +- Add **Node.js API testing guide** at `tools/nodejs_api/docs/nodejs_testing.md` for test authors and reviewers (assertions, isolation, data types, concurrency, errors, resource lifecycle, validation checklist). Remove `tools/nodejs_api/test/test_correctness_audit.md` in favor of this guide. + diff --git a/tools/nodejs_api/CMakeLists.txt b/tools/nodejs_api/CMakeLists.txt index c1bf5959a5..743259bfca 100644 --- a/tools/nodejs_api/CMakeLists.txt +++ b/tools/nodejs_api/CMakeLists.txt @@ -9,25 +9,42 @@ else() set(NPX_CMD npx) endif() +# Use --log-level error so INFO lines are not captured in OUTPUT_VARIABLE execute_process( - COMMAND ${NPX_CMD} cmake-js print-cmakejs-include + COMMAND ${NPX_CMD} cmake-js print-cmakejs-include --log-level error WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE CMAKE_JS_INC ) execute_process( - COMMAND ${NPX_CMD} cmake-js print-cmakejs-lib + COMMAND ${NPX_CMD} cmake-js print-cmakejs-lib --log-level error WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE CMAKE_JS_LIB ) execute_process( - COMMAND ${NPX_CMD} cmake-js print-cmakejs-src + COMMAND ${NPX_CMD} cmake-js print-cmakejs-src --log-level error WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE CMAKE_JS_SRC ) -string(STRIP ${CMAKE_JS_INC} CMAKE_JS_INC) -string(STRIP ${CMAKE_JS_LIB} CMAKE_JS_LIB) -string(STRIP ${CMAKE_JS_SRC} CMAKE_JS_SRC) +string(STRIP "${CMAKE_JS_INC}" CMAKE_JS_INC) +string(STRIP "${CMAKE_JS_LIB}" CMAKE_JS_LIB) +string(STRIP "${CMAKE_JS_SRC}" CMAKE_JS_SRC) +# Filter out cmake-js INFO lines that may be mixed into stdout +foreach(VAR CMAKE_JS_INC CMAKE_JS_LIB CMAKE_JS_SRC) + string(REPLACE "\n" ";" _LINES "${${VAR}}") + set(_FILTERED "") + foreach(_LINE ${_LINES}) + string(STRIP "${_LINE}" _LINE) + if(_LINE AND NOT _LINE MATCHES "^INFO") + list(APPEND _FILTERED "${_LINE}") + endif() + endforeach() + list(JOIN _FILTERED " " _JOINED) + set(${VAR} "${_JOINED}") +endforeach() +string(STRIP "${CMAKE_JS_INC}" CMAKE_JS_INC) +string(STRIP "${CMAKE_JS_LIB}" CMAKE_JS_LIB) +string(STRIP "${CMAKE_JS_SRC}" CMAKE_JS_SRC) # Print CMAKE_JS variables message(STATUS "CMake.js configurations: LIB=${CMAKE_JS_LIB}, INC=${CMAKE_JS_INC}, SRC=${CMAKE_JS_SRC}") diff --git a/tools/nodejs_api/README.md b/tools/nodejs_api/README.md index 086c63eb2f..f1189270c2 100644 --- a/tools/nodejs_api/README.md +++ b/tools/nodejs_api/README.md @@ -7,10 +7,28 @@ A high-performance graph database for knowledge-intensive applications. This Nod ## 📦 Installation +**Node.js version requirement** + +This package **requires Node.js 20 or later**. Older Node.js versions are not supported and installation may fail due to the enforced `engines.node` constraint and native build tooling (`cmake-js` 8.x, `node-addon-api` 8.x). + +**From npm (if published):** + ```bash npm install lbug ``` +**From GitHub** (monorepo; the Node package lives in `tools/nodejs_api`): + +- **pnpm** (v9+), subdirectory is supported: + + ```bash + pnpm add lbug@github:LadybugDB/ladybug#path:tools/nodejs_api + ``` + + On install, the package will build the native addon from source (needs CMake and a C++20 compiler). + +- **npm**: no built-in subdirectory install. Either use a **local path** after cloning and building (see [Build and use in other projects](#-build-and-use-in-other-projects-local)), or a tarball from [GitPkg](https://gitpkg.vercel.app/) (e.g. `https://gitpkg.vercel.app/LadybugDB/ladybug/tools/nodejs_api?main`). + --- ## 🚀 Quick Start @@ -49,10 +67,8 @@ const main = async () => { // Run a query const result = await conn.query("MATCH (u:User) RETURN u.name, u.age;"); - // Fetch all results + // Consume results (choose one style) const rows = await result.getAll(); - - // Output results for (const row of rows) { console.log(row); } @@ -66,14 +82,186 @@ main().catch(console.error); ## 📚 API Overview +**Full API reference:** [docs/API.md](docs/API.md) — types, methods, options, errors, and constants. + The `lbug` package exposes the following primary classes: -* `Database` – Initializes a database from a file path. -* `Connection` – Executes queries on a connected database. -* `QueryResult` – Provides methods like `getAll()` to retrieve results. +* **Database** – `new Database(path, bufferPoolSize?, ...)`. Initialize with `init()` / `initSync()` (optional; done on first use). When the file is locked, **async init() retries for up to 5s** (configurable: last ctor arg `openLockRetryMs`; set `0` to fail immediately). Close with `close()`. +* **Connection** – `new Connection(database, numThreads?)`. Run Cypher with `query(statement)` or `prepare(statement)` then `execute(preparedStatement, params)`. Use `transaction(fn)` for a single write transaction, `ping()` for liveness checks. **`getNumNodes(nodeName)`** and **`getNumRels(relName)`** return row counts for node/rel tables. Use `registerStream(name, source, { columns })` to load data from an AsyncIterable via `LOAD FROM name`; `unregisterStream(name)` when done. Configure with `setQueryTimeout(ms)`, `setMaxNumThreadForExec(n)`. +* **QueryResult** – Returned by `query()` / `execute()`. Consume with `getAll()`, `getNext()` / `hasNext()`, **async iteration** (`for await...of`), or **`toStream()`** (Node.js `Readable`). Use **`toString()`** for a string representation (header + rows; useful for debugging). Metadata: `getColumnNames()`, `getColumnDataTypes()`, `getQuerySummary()`. Call `close()` when done (optional if fully consumed). +* **PreparedStatement** – Created by `conn.prepare(statement)`. Execute with `conn.execute(preparedStatement, params)`. Reuse for parameterized queries. +* **Pool** – `createPool({ databasePath, maxSize, ... })` returns a connection pool. Use **`pool.run(conn => ...)`** (recommended) or `acquire()` / `release(conn)`; call **`pool.close()`** when done. Both CommonJS (`require`) and ES Modules (`import`) are fully supported. +### Consuming query results + +```js +const result = await conn.query("MATCH (n:User) RETURN n.name LIMIT 1000"); + +// Option 1: get all rows (loads into memory) +const rows = await result.getAll(); + +// Option 2: row by row (async) +while (result.hasNext()) { + const row = await result.getNext(); + console.log(row); +} + +// Option 3: async iterator (streaming, no full materialization) +for await (const row of result) { + console.log(row); +} + +// Option 4: Node.js Readable stream (e.g. for .pipe()) +const stream = result.toStream(); +stream.on("data", (row) => console.log(row)); + +// Option 5: string representation (e.g. for debugging) +console.log(result.toString()); +``` + +### Table counts + +After creating node/rel tables and loading data, you can get row counts: + +```js +conn.initSync(); // or await conn.init() +const numUsers = conn.getNumNodes("User"); +const numFollows = conn.getNumRels("Follows"); +``` + +### Connection pool + +Use **`createPool(options)`** to get a pool of connections (one shared `Database`, up to `maxSize` connections). Prefer **`pool.run(fn)`**: it acquires a connection, runs `fn(conn)`, and releases in `finally` (on success or throw), so you never leak a connection. + +**Options:** `maxSize` (required), `databasePath`, `databaseOptions` (same shape as `Database` constructor), `minSize` (default 0), `acquireTimeoutMillis` (default 0 = wait forever), `validateOnAcquire` (default false; if true, `conn.ping()` before hand-out). + +**Example (recommended: `run`):** + +```js +import { createPool } from "lbug"; + +const pool = createPool({ databasePath: "./mydb", maxSize: 10 }); + +const rows = await pool.run(async (conn) => { + const result = await conn.query("MATCH (u:User) RETURN u.name LIMIT 5"); + const rows = await result.getAll(); + result.close(); + return rows; +}); +console.log(rows); + +await pool.close(); +``` + +**Manual acquire/release:** If you need the same connection for multiple operations, use `acquire()` and always call `release(conn)` in a `finally` block so the connection is returned even on throw. + +```js +const conn = await pool.acquire(); +try { + await conn.query("..."); + // ... +} finally { + pool.release(conn); +} +``` + +When shutting down, call **`pool.close()`**: it rejects new and pending `acquire()`, then closes all connections and the database. + +### Transactions + +**Manual:** Run `BEGIN TRANSACTION`, then your queries, then `COMMIT` or `ROLLBACK`. On error, call `ROLLBACK` before continuing. + +```js +await conn.query("BEGIN TRANSACTION"); +await conn.query("CREATE NODE TABLE Nodes(id INT64, PRIMARY KEY(id))"); +await conn.query('COPY Nodes FROM "data.csv"'); +await conn.query("COMMIT"); +// or on error: await conn.query("ROLLBACK"); +``` + +**Read-only transaction:** `BEGIN TRANSACTION READ ONLY` then queries, then `COMMIT` / `ROLLBACK`. + +**Wrapper:** One write transaction with automatic commit on success and rollback on throw: + +```js +await conn.transaction(async () => { + await conn.query("CREATE NODE TABLE Nodes(id INT64, PRIMARY KEY(id))"); + await conn.query('COPY Nodes FROM "data.csv"'); + // commit happens automatically; on throw, rollback then rethrow +}); +``` + +### Loading data from a Node.js stream + +You can feed data from an **AsyncIterable** (generator, async generator, or any `Symbol.asyncIterator`) into Cypher using **scan replacement**: register a stream by name, then use `LOAD FROM name` in your query. Rows are pulled from JavaScript on demand during execution. + +**API:** + +* **`conn.registerStream(name, source, options)`** (async) + * `name` – string used in Cypher: `LOAD FROM name RETURN ...` + * `source` – AsyncIterable of rows. Each row is an **array** of column values (same order as `options.columns`) or an **object** keyed by column name. + * `options.columns` – **required**. Schema: array of `{ name: string, type: string }`. Supported types: `INT64`, `INT32`, `INT16`, `INT8`, `UINT64`, `UINT32`, `DOUBLE`, `FLOAT`, `STRING`, `BOOL`, `DATE`, `TIMESTAMP`. + +* **`conn.unregisterStream(name)`** + Unregisters the source so the name can be reused or to avoid leaving stale entries. Call after the query (or when done with the stream). + +**Example:** + +```js +async function* generateRows() { + yield [1, "Alice"]; + yield [2, "Bob"]; + yield [3, "Carol"]; +} + +await conn.registerStream("users", generateRows(), { + columns: [ + { name: "id", type: "INT64" }, + { name: "name", type: "STRING" }, + ], +}); + +const result = await conn.query("LOAD FROM users RETURN *"); +for await (const row of result) { + console.log(row); // { id: 1, name: "Alice" }, ... +} + +conn.unregisterStream("users"); +``` + +You can combine the stream with other Cypher: e.g. `LOAD FROM stream RETURN * WHERE col > 0`, or `COPY MyTable FROM (LOAD FROM stream RETURN *)`. + +### Database locked + +Only one process can open the same database path for writing. If the file is already locked, **async `init()` retries for up to 5 seconds** by default (grace period), then throws. You can tune or disable this: + +- **Default**: `new Database("./my.db")` — last ctor arg `openLockRetryMs` defaults to `5000` (retry for up to 5s on lock). +- **No retry**: `new Database("./my.db", 0, true, false, 0, true, -1, true, true, 0)` or pass `openLockRetryMs = 0` as the 10th argument to fail immediately. +- **Longer grace**: e.g. `openLockRetryMs = 3000` to wait up to 3s. + +The error has **`code === 'LBUG_DATABASE_LOCKED'`** so you can catch and handle it if the grace period wasn’t enough: + +```js +import { Database, Connection, LBUG_DATABASE_LOCKED } from "lbug"; + +const db = new Database("./my.db"); // already retries ~5s on lock +try { + await db.init(); +} catch (err) { + if (err.code === LBUG_DATABASE_LOCKED) { + console.error("Database still locked after grace period."); + } + throw err; +} +const conn = new Connection(db); +``` + +Use **read-only** mode for concurrent readers: `new Database(path, undefined, undefined, true)` so multiple processes can open the same DB for read. + +See [docs/database_locked.md](docs/database_locked.md) for how other systems handle this and best practices. + --- ## 🛠️ Local Development (for Contributors) @@ -92,10 +280,100 @@ npm run build ### Run Tests +See [docs/nodejs_testing.md](docs/nodejs_testing.md) for guidelines on writing and reviewing tests. + ```bash npm test ``` +When developing from the **monorepo root**, build the native addon first so tests see the latest C++ code: + +```bash +# From repo root (D:\prj\ladybug or similar) +make nodejs +# Or: cmake --build build/release --target lbugjs +# Then from tools/nodejs_api: +cd tools/nodejs_api && npm test +``` + +--- + +## 🔧 Build and use in other projects (local) + +To use the Node.js API from the Ladybug repo in another project without publishing to npm: + +1. **Build the addon** (from the Ladybug repo root): + + ```bash + make nodejs + ``` + + Or from this directory: + + ```bash + npm run build + ``` + + This compiles the native addon into `build/lbugjs.node` and copies JS and types. + +2. **In your other project**, add a file dependency in `package.json`: + + ```json + "dependencies": { + "lbug": "file:../path/to/ladybug/tools/nodejs_api" + } + ``` + + Then run `npm install`. After that, `require("lbug")` or `import ... from "lbug"` will use your local build. + +3. **Optional:** to pack and install a tarball instead: + + ```bash + cd /path/to/ladybug/tools/nodejs_api + npm run build + npm pack + ``` + + In the other project: `npm install /path/to/ladybug/tools/nodejs_api/lbug-0.0.1.tgz`. + +### Prebuilt in your fork (install from GitHub without building) + +If you install from GitHub (e.g. `pnpm add lbug@github:user/ladybug#path:tools/nodejs_api`), the package runs `install.js`: if it finds a prebuilt binary, it uses it and does not build from source. To ship a prebuilt in your fork: + +1. **Build once** in your clone (from repo root): + + ```bash + make nodejs + ``` + +2. **Create the prebuilt file** (name = `lbugjs--.node`): + + - Windows x64: copy `tools/nodejs_api/build/lbugjs.node` → `tools/nodejs_api/prebuilt/lbugjs-win32-x64.node` + - Linux x64: `lbugjs-linux-x64.node` + - macOS x64: `lbugjs-darwin-x64.node`, arm64: `lbugjs-darwin-arm64.node` + + Example (from repo root). **Windows (PowerShell):** + + ```powershell + New-Item -ItemType Directory -Force -Path tools/nodejs_api/prebuilt + Copy-Item tools/nodejs_api/build/lbugjs.node tools/nodejs_api/prebuilt/lbugjs-win32-x64.node + ``` + + **Linux/macOS:** + + ```bash + mkdir -p tools/nodejs_api/prebuilt + cp tools/nodejs_api/build/lbugjs.node tools/nodejs_api/prebuilt/lbugjs-$(node -p "process.platform")-$(node -p "process.arch").node + ``` + +3. **Commit and push** the `prebuilt/` folder. Then anyone (or you in another project) can do: + + ```bash + pnpm add lbug@github:YOUR_USERNAME/ladybug#path:tools/nodejs_api + ``` + + and the addon will be used from prebuilt without a local build. + --- ## 📦 Packaging and Binary Distribution diff --git a/tools/nodejs_api/build.js b/tools/nodejs_api/build.js index d326300260..b1b93bae43 100644 --- a/tools/nodejs_api/build.js +++ b/tools/nodejs_api/build.js @@ -3,6 +3,9 @@ const path = require("path"); const { execSync } = require("child_process"); const SRC_PATH = path.resolve(__dirname, "../.."); +const NODEJS_API = path.resolve(__dirname, "."); +const BUILD_DIR = path.join(NODEJS_API, "build"); +const SRC_JS_DIR = path.join(NODEJS_API, "src_js"); const THREADS = require("os").cpus().length; console.log(`Using ${THREADS} threads to build Lbug.`); @@ -12,3 +15,19 @@ execSync(`make nodejs NUM_THREADS=${THREADS}`, { cwd: SRC_PATH, stdio: "inherit", }); + +// Ensure build/ has latest JS from src_js (CMake copies at configure time only) +if (fs.existsSync(SRC_JS_DIR) && fs.existsSync(BUILD_DIR)) { + const files = fs.readdirSync(SRC_JS_DIR); + for (const name of files) { + if (name.endsWith(".js") || name.endsWith(".mjs") || name.endsWith(".d.ts")) { + fs.copyFileSync(path.join(SRC_JS_DIR, name), path.join(BUILD_DIR, name)); + } + } + // So package root has types when used as file: dependency + const dts = path.join(BUILD_DIR, "lbug.d.ts"); + if (fs.existsSync(dts)) { + fs.copyFileSync(dts, path.join(NODEJS_API, "lbug.d.ts")); + } + console.log("Copied src_js to build."); +} diff --git a/tools/nodejs_api/copy_src_to_build.js b/tools/nodejs_api/copy_src_to_build.js new file mode 100644 index 0000000000..8ebe1581c4 --- /dev/null +++ b/tools/nodejs_api/copy_src_to_build.js @@ -0,0 +1,22 @@ +/** + * Copies src_js/*.js, *.mjs, *.d.ts into build/ so tests run with the latest JS + * after "make nodejs" (which only copies at cmake configure time). + * Run from tools/nodejs_api. + */ +const fs = require("fs"); +const path = require("path"); + +const srcDir = path.join(__dirname, "src_js"); +const buildDir = path.join(__dirname, "build"); + +if (!fs.existsSync(buildDir)) { + console.warn("copy_src_to_build: build/ missing, run make nodejs first."); + process.exit(0); +} + +const re = /\.(js|mjs|d\.ts)$/; +const files = fs.readdirSync(srcDir).filter((n) => re.test(n)); +for (const name of files) { + fs.copyFileSync(path.join(srcDir, name), path.join(buildDir, name)); +} +console.log("Copied", files.length, "files from src_js to build."); diff --git a/tools/nodejs_api/docs/API.md b/tools/nodejs_api/docs/API.md new file mode 100644 index 0000000000..f7d22a4da2 --- /dev/null +++ b/tools/nodejs_api/docs/API.md @@ -0,0 +1,319 @@ +# Ladybug Node.js API Reference + +Detailed API documentation for the `lbug` package. For installation, quick start, and usage patterns see [README.md](../README.md). + +--- + +## Module exports + +**CommonJS:** + +```js +const lbug = require("lbug"); +// or +const { Database, Connection, PreparedStatement, QueryResult, createPool, Pool, LBUG_DATABASE_LOCKED, VERSION, STORAGE_VERSION } = require("lbug"); +``` + +**ES Modules:** + +```js +import lbug from "lbug"; +// or +import { Database, Connection, PreparedStatement, QueryResult, createPool, Pool, LBUG_DATABASE_LOCKED, VERSION, STORAGE_VERSION } from "lbug"; +``` + +| Export | Description | +|--------|-------------| +| `Database` | Database instance (path, options). | +| `Connection` | Connection to a database; runs Cypher and manages streams. | +| `PreparedStatement` | Prepared Cypher statement (from `Connection.prepare`). | +| `QueryResult` | Result of `query()` / `execute()`; async iterable, stream, getAll, etc. | +| `createPool` | Factory: `createPool(options)` → `Pool`. | +| `Pool` | Connection pool (use `createPool`, not `new Pool`). | +| `LBUG_DATABASE_LOCKED` | Error code string when DB file is locked. | +| `VERSION` | Library version string. | +| `STORAGE_VERSION` | Storage version (bigint). | + +--- + +## Types (TypeScript / JSDoc) + +### Value types + +| Type | Description | +|------|-------------| +| `Nullable` | `T \| null` | +| `Callback` | `(error: Error \| null, result?: T) => void` | +| `ProgressCallback` | `(pipelineProgress, numPipelinesFinished, numPipelines) => void` | +| `QueryOptions` | `{ signal?: AbortSignal; progressCallback?: ProgressCallback }` | +| `NodeID` | `{ offset: number; table: number }` | +| `NodeValue` | `{ _label: string \| null; _id: NodeID \| null; [key: string]: any }` | +| `RelValue` | `{ _src, _dst, _label, _id; [key: string]: any }` | +| `RecursiveRelValue` | `{ _nodes: any[]; _rels: any[] }` | +| `LbugValue` | `null \| boolean \| number \| bigint \| string \| Date \| NodeValue \| RelValue \| RecursiveRelValue \| LbugValue[] \| { [key: string]: LbugValue }` | + +### Config types + +| Type | Description | +|------|-------------| +| `SystemConfig` | Database options (bufferPoolSize, enableCompression, readOnly, maxDBSize, autoCheckpoint, checkpointThreshold). | +| `PoolDatabaseOptions` | Same shape as Database constructor options (no path): bufferManagerSize, enableCompression, readOnly, maxDBSize, autoCheckpoint, checkpointThreshold, throwOnWalReplayFailure, enableChecksums, openLockRetryMs. | +| `PoolOptions` | databasePath?, databaseOptions?, minSize?, **maxSize**, acquireTimeoutMillis?, validateOnAcquire?. | +| `QuerySummary` | `{ compilingTime: number; executionTime: number }` (milliseconds). | + +--- + +## Database + +In-process database instance. One database can be shared by multiple `Connection` instances (e.g. in a pool). + +### Constructor + +```ts +new Database( + databasePath?: string, // default ":memory:" + bufferManagerSize?: number, // default 0 + enableCompression?: boolean, // default true + readOnly?: boolean, // default false + maxDBSize?: number, // default 0 + autoCheckpoint?: boolean, // default true + checkpointThreshold?: number, // default -1 + throwOnWalReplayFailure?: boolean, // default true + enableChecksums?: boolean, // default true + openLockRetryMs?: number // default 5000; 0 = fail immediately on lock +) +``` + +- **databasePath**: `":memory:"` or path to directory. Empty/undefined → `":memory:"`. +- **openLockRetryMs**: Only for async `init()`. Retry opening for up to this many ms when file is locked. Ignored for `:memory:`. + +### Instance methods + +| Method | Returns | Description | +|--------|---------|-------------| +| `init()` | `Promise` | Initialize DB (optional; done on first use). Retries on lock for up to `openLockRetryMs`. | +| `initSync()` | `void` | Initialize synchronously; blocks. No retry on lock. | +| `close()` | `Promise` | Close and release resources. | +| `closeSync()` | `void` | Close synchronously. | + +### Static methods + +| Method | Returns | Description | +|--------|---------|-------------| +| `Database.getVersion()` | `string` | Library version. | +| `Database.getStorageVersion()` | `number` | Storage version. | + +### Errors + +- Lock errors on init are normalized to `Error` with `code === LBUG_DATABASE_LOCKED`. See [database_locked.md](database_locked.md). + +--- + +## Connection + +Connection to a `Database`. Use for queries, prepared statements, transactions, streams, and metadata. + +### Constructor + +```ts +new Connection(database: Database, numThreads?: number) +``` + +- **numThreads**: Max threads for query execution. Can be set later with `setMaxNumThreadForExec(numThreads)`. + +### Initialization + +| Method | Returns | Description | +|--------|---------|-------------| +| `init()` | `Promise` | Initialize connection (optional; done on first query). | +| `initSync()` | `void` | Initialize synchronously; may block. | + +### Query execution + +| Method | Returns | Description | +|--------|---------|-------------| +| `query(statement, optionsOrProgressCallback?)` | `Promise` | Execute Cypher. Options: `{ signal?, progressCallback? }`. Rejects with `AbortError` if `signal` aborted. | +| `querySync(statement)` | `QueryResult \| QueryResult[]` | Execute synchronously; blocks. | +| `prepare(statement)` | `Promise` | Prepare a statement. | +| `prepareSync(statement)` | `PreparedStatement` | Prepare synchronously. | +| `execute(preparedStatement, params?, optionsOrProgressCallback?)` | `Promise` | Execute prepared statement with `params` object. Same options as `query`. | +| `executeSync(preparedStatement, params?)` | `QueryResult \| QueryResult[]` | Execute prepared statement synchronously. | + +**params**: Plain object, e.g. `{ name: "Alice", age: 30 }`. Keys must match parameter names in the prepared Cypher. + +### Transaction + +| Method | Returns | Description | +|--------|---------|-------------| +| `transaction(fn)` | `Promise` | Run `fn()` in a single write transaction. `BEGIN TRANSACTION` → fn() → `COMMIT` on success, `ROLLBACK` on throw. | + +### Configuration and control + +| Method | Returns | Description | +|--------|---------|-------------| +| `setMaxNumThreadForExec(numThreads)` | `void` | Max threads for execution. | +| `setQueryTimeout(timeoutInMs)` | `void` | Query timeout in ms; queries aborted after this. | +| `interrupt()` | `void` | Interrupt current query on this connection. No-op if none running. | + +### Metadata and health + +| Method | Returns | Description | +|--------|---------|-------------| +| `ping()` | `Promise` | Liveness check; rejects if connection broken. | +| `explain(statement)` | `Promise` | Run EXPLAIN on Cypher; returns plan string (one row per line). | +| `getNumNodes(nodeName)` | `number` | Count of nodes in node table. Connection must be initialized. | +| `getNumRels(relName)` | `number` | Count of relationships in rel table. | + +### Stream source (LOAD FROM) + +| Method | Returns | Description | +|--------|---------|-------------| +| `registerStream(name, source, options)` | `Promise` | Register AsyncIterable as `LOAD FROM name`. **options.columns** required: `[{ name, type }]`. Types: INT64, INT32, INT16, INT8, UINT64, UINT32, DOUBLE, FLOAT, STRING, BOOL, DATE, TIMESTAMP. | +| `unregisterStream(name)` | `void` | Unregister stream by name. | + +**source**: AsyncIterable of rows; each row is an array (column order) or object (column names). + +### Lifecycle + +| Method | Returns | Description | +|--------|---------|-------------| +| `close()` | `Promise` | Close connection. | +| `closeSync()` | `void` | Close synchronously. | + +--- + +## PreparedStatement + +Created by `Connection.prepare()` / `Connection.prepareSync()`. Do not construct directly. + +### Instance methods + +| Method | Returns | Description | +|--------|---------|-------------| +| `isSuccess()` | `boolean` | Whether preparation succeeded. | +| `getErrorMessage()` | `string` | Error message if preparation failed. | + +Execution is via `conn.execute(preparedStatement, params)` or `conn.executeSync(preparedStatement, params)`. If `!isSuccess()`, `execute` rejects with `getErrorMessage()`. + +--- + +## QueryResult + +Returned by `Connection.query()`, `Connection.querySync()`, `Connection.execute()`, `Connection.executeSync()`. Implements `AsyncIterable | null>`. + +### Consumption (pick one style) + +| Method / usage | Returns | Description | +|----------------|---------|-------------| +| `getAll()` | `Promise` | All rows (loads into memory). | +| `getAllSync()` | `Record[]` | All rows synchronously. | +| `getNext()` | `Promise` | Next row; null when exhausted. | +| `getNextSync()` | `Record \| null` | Next row synchronously. | +| `hasNext()` | `boolean` | Whether more rows exist. | +| `for await (const row of result)` | — | Async iteration; no full materialization. | +| `toStream()` | `stream.Readable` | Node.js Readable (object mode), one row per chunk. | +| `each(resultCb, doneCb, errorCb)` | `void` | Callback-based iteration. | +| `all(resultCb, errorCb)` | `void` | Callback with all rows. | +| `toString()` | `string` | Header + rows (or error message for failed query). | + +### Metadata + +| Method | Returns | Description | +|--------|---------|-------------| +| `getNumTuples()` | `number` | Number of rows. | +| `getColumnNames()` | `Promise` | Column names. | +| `getColumnNamesSync()` | `string[]` | Column names synchronously. | +| `getColumnDataTypes()` | `Promise` | Column data types. | +| `getColumnDataTypesSync()` | `string[]` | Column types synchronously. | +| `getQuerySummary()` | `Promise` | `{ compilingTime, executionTime }` in ms. | +| `getQuerySummarySync()` | `QuerySummary` | Same, synchronously. | + +### Other + +| Method | Returns | Description | +|--------|---------|-------------| +| `resetIterator()` | `void` | Reset cursor to start (for re-iteration). | +| `close()` | `void` | Release resources. Optional if fully consumed. | + +**Multiple results**: A batch of statements can return `QueryResult[]`. Single statement returns one `QueryResult`. + +--- + +## Pool and createPool + +Connection pool: one shared `Database`, up to `maxSize` `Connection` instances. + +### createPool(options) + +```ts +function createPool(options: PoolOptions): Pool +``` + +**PoolOptions:** + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `databasePath` | string | `":memory:"` | DB path. | +| `databaseOptions` | PoolDatabaseOptions | — | Same shape as Database constructor (no path). | +| `minSize` | number | 0 | Minimum connections to keep. | +| `maxSize` | number | **required** | Maximum connections. | +| `acquireTimeoutMillis` | number | 0 | Max wait for acquire (0 = wait forever). | +| `validateOnAcquire` | boolean | false | If true, call `conn.ping()` before handing out. | + +### Pool methods + +| Method | Returns | Description | +|--------|---------|-------------| +| `acquire()` | `Promise` | Get a connection; **must** call `release(conn)` when done. | +| `release(conn)` | `void` | Return connection to pool. | +| `run(fn)` | `Promise` | Acquire, run `fn(conn)`, release in `finally`. Preferred over manual acquire/release. | +| `close()` | `Promise` | Reject new/pending acquire; close all connections and database. | + +**Example:** + +```js +const pool = createPool({ databasePath: "./mydb", maxSize: 10 }); +const rows = await pool.run(async (conn) => { + const result = await conn.query("MATCH (u:User) RETURN u.name LIMIT 5"); + const rows = await result.getAll(); + result.close(); + return rows; +}); +await pool.close(); +``` + +--- + +## Constants + +| Name | Type | Description | +|------|------|-------------| +| `LBUG_DATABASE_LOCKED` | `"LBUG_DATABASE_LOCKED"` | Error code when DB file is locked. Use with `err.code === LBUG_DATABASE_LOCKED`. | +| `VERSION` | string | Library version (same as `Database.getVersion()`). | +| `STORAGE_VERSION` | bigint | Storage version (same as `Database.getStorageVersion()`). | + +--- + +## Query options and cancellation + +- **signal**: Pass `AbortSignal` (e.g. from `AbortController`) in options to cancel `query()` or `execute()`. On abort, the promise rejects with `DOMException` "AbortError". +- **progressCallback**: `(pipelineProgress, numPipelinesFinished, numPipelines) => void`. Optional progress updates during execution. + +Legacy: you can pass a single function as the second argument to `query(statement, progressCallback)` or `execute(ps, params, progressCallback)` instead of an options object. + +--- + +## Error handling + +- **Database lock**: Async `init()` retries for `openLockRetryMs` (default 5s). Then throws with `code === LBUG_DATABASE_LOCKED`. See [database_locked.md](database_locked.md). +- **Abort**: When `options.signal` is aborted, `query`/`execute` reject with `DOMException` "AbortError". +- **Prepared statement**: If `!preparedStatement.isSuccess()`, `execute` rejects with `preparedStatement.getErrorMessage()`. +- **Validation**: Invalid arguments (e.g. non-object params, wrong types) throw `Error` with descriptive messages. + +--- + +## Related docs + +- [README.md](../README.md) — Installation, quick start, transactions, stream loading, pool usage, prebuilt binaries. +- [database_locked.md](database_locked.md) — Lock behavior, retry, read-only, best practices. +- [execution_chain_analysis.md](execution_chain_analysis.md) — LOAD FROM stream execution chain (for implementers). diff --git a/tools/nodejs_api/docs/database_locked.md b/tools/nodejs_api/docs/database_locked.md new file mode 100644 index 0000000000..164adc3f9c --- /dev/null +++ b/tools/nodejs_api/docs/database_locked.md @@ -0,0 +1,40 @@ +# Database locked + +## When it happens + +The database file is locked when: + +- Another process has already opened the same path for read-write (e.g. another Node app, the Ladybug shell, or a backup). +- You open the same path twice in one process (e.g. two `Database` instances to the same path) and both try to write. + +Opening is done at the first use: `db.init()`, `db.initSync()`, or the first `conn.query()` on that database. If the OS file lock cannot be acquired, the native layer throws and the Node API surfaces it as an **Error with `code === 'LBUG_DATABASE_LOCKED'`**. + +## How other systems handle it + +| System | Approach | +|----------|----------| +| **SQLite** | `busy_timeout` (e.g. 5 seconds): block until lock is released or timeout, then return `SQLITE_BUSY`. Apps often retry with exponential backoff. | +| **DuckDB** | Open fails immediately if locked; application retries with backoff. | +| **LMDB** | Single writer; readers use `MDB_NOLOCK` or shared lock. Writers get exclusive lock. | +| **RocksDB** | Options for concurrent access; single process or client–server. | + +Common patterns: + +1. **Fail fast** — return a clear error (e.g. “database locked”) so the app can show a message or retry. +2. **Retry with backoff** — in application code: catch the error, wait (e.g. 50 ms, 100 ms, 200 ms), try again, then give up. +3. **Block with timeout** — wait up to N ms for the lock (requires support in the engine; Ladybug currently uses “fail immediately”). +4. **Read-only for readers** — open in read-only mode so multiple processes can read; only one writer. + +## What the Node API does + +- **Grace period (async init only)**: When you open a database with async `init()` (or the first `query()`), the driver **retries for up to 5 seconds** by default if the file is locked. So short-lived contention (e.g. MCP server or another tool briefly holding the lock) often succeeds without you doing anything. Configure with the last constructor argument `openLockRetryMs` (default `5000`; set `0` to fail immediately). +- **Clear error**: After the grace period or when retry is disabled, you get an Error whose message includes “Could not set lock on file” and a link to the concurrency docs. +- **Error code**: The error is normalized so `err.code === 'LBUG_DATABASE_LOCKED'`. You can import `LBUG_DATABASE_LOCKED` from `lbug` and catch it if you need custom retry or messaging. +- **Sync init**: `initSync()` does not retry; it fails immediately on lock (no blocking wait in the driver). + +## Best practices + +1. **One writer per path** — avoid opening the same on-disk database for write from more than one process at a time. +2. **Concurrent readers** — use `new Database(path, undefined, undefined, true)` (read-only) so multiple processes can read the same DB. +3. **Retry with backoff** — if you expect short-lived contention (e.g. restart or another tool), catch `LBUG_DATABASE_LOCKED`, wait, and retry a few times. +4. **Close when done** — call `db.close()` so the lock is released for other processes. diff --git a/tools/nodejs_api/docs/execution_chain_analysis.md b/tools/nodejs_api/docs/execution_chain_analysis.md new file mode 100644 index 0000000000..8e633201bc --- /dev/null +++ b/tools/nodejs_api/docs/execution_chain_analysis.md @@ -0,0 +1,65 @@ +# LOAD FROM stream: Execution Chain (reference & recommendations) + +## Execution chain diagram + +```mermaid +%%{init: {'flowchart': {'defaultRenderer': 'elk', 'elk': {'direction': 'DOWN'}}}}%% +flowchart TB + subgraph JS["JS (main thread)"] + direction TB + A["query('LOAD FROM name RETURN *')"] + B["registerStream: getChunk(requestId) → pending.push; runConsumer via setImmediate"] + C["runConsumer: sort pending, for each id take it.next(), returnChunk(id, rows, done)"] + D["AsyncIterator: it.next() → yield rows"] + A --> B + B --> C + C --> D + end + + subgraph CppAddon["C++ addon (Node worker thread)"] + direction TB + E["tableFunc: mutex, nextRequestId(), setChunkRequest, BlockingCall(getChunk)"] + F["wait reqPtr->cv until filled"] + G["returnChunkFromJS: req->rows, filled=true, cv.notify_one"] + H["Copy rows to output.dataChunk, return cap"] + E --> F + G --> F + F --> H + end + + subgraph Engine["Engine (single task thread, canParallelFunc=false)"] + direction TB + I["getNextTuple → getNextTuplesInternal"] + J["tableFunc(input, output) → numTuplesScanned"] + K["FactorizedTable accumulates chunks"] + L["MaterializedQueryResult + FactorizedTableIterator"] + I --> J + J --> K + K --> L + end + + J --> E + E --> B + C --> G + H --> J + L --> M["JS hasNext / getNext"] + M --> A +``` + +--- + +## Useful observations + +- **Order**: With `canParallelFunc = false`, one engine thread calls `tableFunc` sequentially. Request IDs are assigned under mutex; JS `runConsumer` sorts `pending` and serves chunks by `requestId`, so iterator order is preserved. +- **End of stream**: Engine calls `tableFunc` until it returns 0. JS sends `returnChunk(id, [], true)` when the iterator is done; C++ returns 0 and the engine stops. No extra call after 0. +- **getNext contract**: Core `getNext()` throws if `!hasNext()`. Addon always checks `hasNext()` before `getNext()` and returns `null` when exhausted so that JS API matches `getNext(): Promise`. + +--- + +## Recommendations for the future + +1. **Keep `canParallelFunc = false`** for the node stream table function. Enabling parallelism would require a deterministic merge of chunks by requestId on the engine side; until then, single-thread keeps order and avoids subtle bugs. +2. **Any new code path that reads rows** (e.g. another language binding or helper) must guard with `hasNext()` before `getNext()`; core will throw otherwise. +3. **Mutex in `tableFunc`**: Currently redundant with single-thread execution but harmless. If parallelism is ever introduced, either remove the mutex and solve ordering in the engine or keep it and document that the stream source is intentionally serialized. +4. **Tests**: Prefer iterating with `hasNext()` + `getNext()` and asserting `getNext()` returns `null` exactly when `hasNext()` becomes false, to lock the contract (see `test_query_result.js`). +5. **Rebuild and full test run** (e.g. `register_stream` + `query_result`) after any change in the addon or engine table function path. diff --git a/tools/nodejs_api/docs/nodejs_testing.md b/tools/nodejs_api/docs/nodejs_testing.md new file mode 100644 index 0000000000..5016e8abad --- /dev/null +++ b/tools/nodejs_api/docs/nodejs_testing.md @@ -0,0 +1,66 @@ +# Node.js API — Testing Guide + +Guidelines for writing and reviewing tests for the Node.js API (`tools/nodejs_api/`). Use this when adding or changing tests to keep the suite correct, isolated, and maintainable. + +--- + +## 1. Assertions and Oracles + +- **Strict equality:** The test shim’s `assert.equal(a, b)` is `strictEqual`. A number and a string (e.g. `1234` vs `'1234'`) are not equal. When the API or DB returns strings (e.g. `current_setting()`), coerce before comparing: `Number(tuple["checkpoint_threshold"]) === 1234`, or compare to the expected string. +- **Floating point:** Use `assert.approximately(actual, expected, delta)` for FLOAT/DOUBLE. Choose a small delta (e.g. `1e-6`). NaN is not considered approximately equal to anything; that is intentional. +- **Arrays in assertions:** In JavaScript, `(a, b)` is the comma operator and evaluates to `b`. Never write `assert.deepEqual(x, [(10, 8)])` — that compares `x` to `[8]`. Use `assert.deepEqual(x, [10, 8])`. +- **API return types:** If the API can return either a value or a list (e.g. single value vs array), assert the actual shape (e.g. `assert.deepEqual(result["usedNames"], ["Aida"])` if the API returns an array). +- **Naming:** Use clear variable names (e.g. `expectedResultArr` instead of typo-prone names) so assertions stay readable. + +--- + +## 2. Test Isolation and Shared State + +- **Assert the right object:** If the test creates its own database or connection (e.g. `testDb`, `testConn`), run config or data checks **against that instance**, not the global `db`/`conn`. Using global `conn` in a test that built `testDb` checks the wrong database and is a logic bug. +- **Prefer a dedicated connection for local DBs:** When testing options of a newly created database, create a connection to that database, run the query, then close both the connection and the database. +- **Close what you open:** If a test creates a connection or database, close it in the same test (or in a reliable `finally`/hook). Leaving connections or databases open can leak handles and affect other tests or the process. +- **Shared fixtures:** Tests that use the global `db`/`conn` from `before()` are fine for read-only or shared-scenario tests; just don’t use them to verify state of a different, locally created DB/conn. + +--- + +## 3. Data Types and Boundaries + +- **Settings as strings:** `current_setting()` returns strings (e.g. `'1234'`, `'False'`). For numeric checks use `Number(...)`; for booleans compare to the string the backend returns (e.g. `"False"`). +- **Large integers:** Values above `Number.MAX_SAFE_INTEGER` (2^53) can lose precision in JavaScript. For UINT64/INT64 round-trip tests with very large values, a short comment is helpful (e.g. that values > 2^53 may be lossy in JS). +- **Column names:** Tests that depend on exact column names (e.g. from `RETURN CAST($1, 'UINT64')`) will break if the backend changes display names. Prefer stable API contracts when possible; otherwise document the dependency. + +--- + +## 4. Concurrency and Timing + +- **Time-based races:** Tests that close a connection or DB after a short delay (e.g. 80 ms) then assert “query rejects” can be flaky on slow CI. Use a timeout (e.g. 2 s) so the test fails fast if the query never rejects, and consider slightly longer delays on CI if needed. +- **Node.js test runner timeout:** For long-running tests (e.g. interrupt), set timeout via the test option: `it("...", { timeout: 5000 }, async function () { ... })`. The test context in `node:test` does not provide `this.timeout()`. +- **Concurrent queries:** When running multiple queries in parallel on the same connection, assert results against known stable data (e.g. fixed IDs) and avoid shared mutable state. + +--- + +## 5. Error Messages and API Contracts + +- **Exact vs partial match:** `assert.equal(e.message, "exact string")` is brittle if the backend changes wording. For stability, prefer `assert.include(e.message, keyPhrase)` or similar when the exact text is not part of the public API contract. +- **Resilience tests:** Checking that the error message contains “closed” or “not allowed” is a good balance between stability and coverage. + +--- + +## 6. Resource Lifecycle and Cleanup + +- **Databases and connections:** Every database or connection created in a test should be closed in that test (or in a `finally`/hook that always runs). This includes “positive” tests (e.g. “should create a database with valid path and no buffer size”). +- **Query results:** Prefer calling `res.close()` when a test opens many results (e.g. concurrency) or when the test is long-lived. Relying on GC alone can hide leaks. +- **Temp directories:** Use a helper (e.g. `withTempDb`) that creates a temp DB/conn, runs the test, and in `finally` closes them and removes the temp path. Avoid leaving temp dirs or DBs open. +- **process.exit(0):** If the test runner uses `process.exit(0)` in `after()` to avoid the event loop hanging (e.g. due to the native addon), document it; it can mask unclosed resources, so use only when necessary. + +--- + +## 7. Validation Checklist + +Before submitting test changes: + +- [ ] Run the full suite: `npm test` (from `tools/nodejs_api/`). +- [ ] If you test against an installed package, run with `TEST_INSTALLED=1` as applicable. +- [ ] For tests that create a local DB or connection, ensure config/data assertions use that instance, not the global `db`/`conn`. +- [ ] Ensure no comma-operator traps in assertions: no `(a, b)` used as an array element in `deepEqual`/`equal`. +- [ ] All resources (DB, connection) created in the test are closed in the same test or a guaranteed cleanup path. diff --git a/tools/nodejs_api/examples/README.md b/tools/nodejs_api/examples/README.md new file mode 100644 index 0000000000..63ace4baf5 --- /dev/null +++ b/tools/nodejs_api/examples/README.md @@ -0,0 +1,11 @@ +# Examples + +Run from `tools/nodejs_api` (after `make nodejs` or `npm run build`): + +```bash +node examples/quickstart.mjs +node examples/stream-load.mjs +``` + +- **quickstart.mjs** — In-memory DB, create table, load data from a stream via `COPY FROM (LOAD FROM ...)`, then query. +- **stream-load.mjs** — Register an async iterable and consume it with `LOAD FROM name RETURN *`. diff --git a/tools/nodejs_api/examples/quickstart.mjs b/tools/nodejs_api/examples/quickstart.mjs new file mode 100644 index 0000000000..d78ec07f9a --- /dev/null +++ b/tools/nodejs_api/examples/quickstart.mjs @@ -0,0 +1,32 @@ +/** + * Quickstart: in-memory database, create schema, load from stream, query. + * Run from tools/nodejs_api: node examples/quickstart.mjs + */ +import { Database, Connection } from "lbug"; + +async function* userRows() { + yield ["Alice", 30]; + yield ["Bob", 25]; +} + +const db = new Database(":memory:"); +const conn = new Connection(db); + +await conn.query(` + CREATE NODE TABLE User(name STRING, age INT64, PRIMARY KEY (name)); +`); + +await conn.registerStream("users", userRows(), { + columns: [ + { name: "name", type: "STRING" }, + { name: "age", type: "INT64" }, + ], +}); +await conn.query("COPY User FROM (LOAD FROM users RETURN *)"); +conn.unregisterStream("users"); + +const result = await conn.query("MATCH (u:User) RETURN u.name, u.age;"); +const rows = await result.getAll(); +console.log(rows); + +await db.close(); diff --git a/tools/nodejs_api/examples/stream-load.mjs b/tools/nodejs_api/examples/stream-load.mjs new file mode 100644 index 0000000000..9adc84f8a9 --- /dev/null +++ b/tools/nodejs_api/examples/stream-load.mjs @@ -0,0 +1,29 @@ +/** + * Load data from a JavaScript async iterable via LOAD FROM. + * Run from tools/nodejs_api: node examples/stream-load.mjs + */ +import { Database, Connection } from "lbug"; + +async function* generateRows() { + yield [1, "Alice"]; + yield [2, "Bob"]; + yield [3, "Carol"]; +} + +const db = new Database(":memory:"); +const conn = new Connection(db); + +await conn.registerStream("users", generateRows(), { + columns: [ + { name: "id", type: "INT64" }, + { name: "name", type: "STRING" }, + ], +}); + +const result = await conn.query("LOAD FROM users RETURN *"); +for await (const row of result) { + console.log(row); +} + +conn.unregisterStream("users"); +await db.close(); diff --git a/tools/nodejs_api/index.js b/tools/nodejs_api/index.js new file mode 100644 index 0000000000..04ea8a1618 --- /dev/null +++ b/tools/nodejs_api/index.js @@ -0,0 +1,4 @@ +"use strict"; + +// After `make nodejs` or `npm run build`, entry point is build/ +module.exports = require("./build"); diff --git a/tools/nodejs_api/index.mjs b/tools/nodejs_api/index.mjs new file mode 100644 index 0000000000..54fd3b6774 --- /dev/null +++ b/tools/nodejs_api/index.mjs @@ -0,0 +1,12 @@ +export { + default, + Database, + Connection, + PreparedStatement, + QueryResult, + createPool, + Pool, + LBUG_DATABASE_LOCKED, + VERSION, + STORAGE_VERSION, +} from "./build/index.mjs"; diff --git a/tools/nodejs_api/install.js b/tools/nodejs_api/install.js index 6a010b335a..cbf69ea41d 100644 --- a/tools/nodejs_api/install.js +++ b/tools/nodejs_api/install.js @@ -8,60 +8,100 @@ const process = require("process"); const isNpmBuildFromSourceSet = process.env.npm_config_build_from_source; const platform = process.platform; const arch = process.arch; + +// Skip when already built (e.g. local dev after make nodejs) +if (fsCallback.existsSync(path.join(__dirname, "build", "lbugjs.node"))) { + process.exit(0); +} + const prebuiltPath = path.join( __dirname, "prebuilt", `lbugjs-${platform}-${arch}.node` ); +const buildDir = path.join(__dirname, "build"); +const srcJsDir = path.join(__dirname, "src_js"); +const lbugSourceDir = path.join(__dirname, "lbug-source"); + // Check if building from source is forced if (isNpmBuildFromSourceSet) { console.log( "The NPM_CONFIG_BUILD_FROM_SOURCE environment variable is set. Building from source." ); } -// Check if prebuilt binaries are available +// Prebuilt available + git-clone layout (src_js present, no lbug-source): use prebuilt and copy src_js → build/ +else if (fsCallback.existsSync(prebuiltPath) && fsCallback.existsSync(srcJsDir)) { + console.log("Prebuilt binary is available (git clone layout)."); + if (!fsCallback.existsSync(buildDir)) { + fsCallback.mkdirSync(buildDir, { recursive: true }); + } + fs.copyFileSync(prebuiltPath, path.join(buildDir, "lbugjs.node")); + const jsFiles = fs.readdirSync(srcJsDir).filter((file) => { + return file.endsWith(".js") || file.endsWith(".mjs") || file.endsWith(".d.ts"); + }); + for (const file of jsFiles) { + fs.copyFileSync(path.join(srcJsDir, file), path.join(buildDir, file)); + } + console.log("Done! Prebuilt + JS copied to build/."); + process.exit(0); +} +// Prebuilt available + tarball layout (lbug-source present): copy to root (legacy publish flow) else if (fsCallback.existsSync(prebuiltPath)) { console.log("Prebuilt binary is available."); - console.log("Copying prebuilt binary to package directory..."); fs.copyFileSync(prebuiltPath, path.join(__dirname, "lbugjs.node")); - console.log( - `Copied ${prebuiltPath} -> ${path.join(__dirname, "lbugjs.node")}.` - ); - console.log("Copying JS files to package directory..."); - const jsSourceDir = path.join( - __dirname, - "lbug-source", - "tools", - "nodejs_api", - "src_js" - ); + const jsSourceDir = path.join(lbugSourceDir, "tools", "nodejs_api", "src_js"); const jsFiles = fs.readdirSync(jsSourceDir).filter((file) => { return file.endsWith(".js") || file.endsWith(".mjs") || file.endsWith(".d.ts"); }); - console.log("Files to copy: "); - for (const file of jsFiles) { - console.log(" " + file); - } for (const file of jsFiles) { fs.copyFileSync(path.join(jsSourceDir, file), path.join(__dirname, file)); } - console.log("Copied JS files to package directory."); console.log("Done!"); process.exit(0); } else { console.log("Prebuilt binary is not available, building from source..."); } +if (!fsCallback.existsSync(lbugSourceDir)) { + // Full git clone (e.g. CI Windows): no lbug-source; install deps only; build via "make nodejs" from repo root. + const repoRoot = path.join(__dirname, "..", ".."); + const repoCmake = path.join(repoRoot, "CMakeLists.txt"); + if (fsCallback.existsSync(repoCmake)) { + console.log("Full clone layout: installing dependencies only. Run 'make nodejs' from repo root to build."); + const nodeModulesDir = path.join(__dirname, "node_modules"); + const lockFile = path.join(__dirname, "package-lock.json"); + if (fsCallback.existsSync(nodeModulesDir)) { + fsCallback.rmSync(nodeModulesDir, { recursive: true, force: true }); + } + if (fsCallback.existsSync(lockFile)) { + fsCallback.unlinkSync(lockFile); + } + const env = { ...process.env, NPM_CONFIG_IGNORE_SCRIPTS: "true" }; + childProcess.execSync("npm install --ignore-scripts --legacy-peer-deps", { cwd: __dirname, stdio: "inherit", env }); + process.exit(0); + } + console.error( + "lbug-source/ not found (install from git clone). Add prebuilt binary to prebuilt/lbugjs-" + + platform + + "-" + + arch + + ".node and commit, or install from a full clone and build there." + ); + process.exit(1); +} + // Get number of threads const THREADS = os.cpus().length; console.log(`Using ${THREADS} threads to build Lbug.`); -// Install dependencies +// Install dependencies only; skip install script so nested install.js does not run (no lbug-source there). console.log("Installing dependencies..."); -childProcess.execSync("npm install", { +const innerNpmEnv = { ...process.env, NPM_CONFIG_IGNORE_SCRIPTS: "true" }; +childProcess.execSync("npm install --ignore-scripts --legacy-peer-deps", { cwd: path.join(__dirname, "lbug-source", "tools", "nodejs_api"), stdio: "inherit", + env: innerNpmEnv, }); // Build the Lbug source code diff --git a/tools/nodejs_api/package.json b/tools/nodejs_api/package.json index f6ff61a8b3..24212114a8 100644 --- a/tools/nodejs_api/package.json +++ b/tools/nodejs_api/package.json @@ -5,14 +5,19 @@ "main": "index.js", "module": "./index.mjs", "types": "./lbug.d.ts", - "exports":{ - ".":{ + "exports": { + ".": { "require": "./index.js", "import": "./index.mjs", "types": "./lbug.d.ts" } }, - "files": ["index.js", "index.mjs", "lbug.d.ts", "lbugjs.node"], + "files": [ + "index.js", + "index.mjs", + "lbug.d.ts", + "lbugjs.node" + ], "type": "commonjs", "homepage": "https://ladybugdb.com/", "repository": { @@ -20,7 +25,8 @@ "url": "git+https://github.com/LadybugDB/ladybug.git" }, "scripts": { - "test": "mocha test --timeout 20000", + "install": "node install.js", + "test": "node --test test/test.js --test-timeout=20000", "clean": "node clean.js", "clean-all": "node clean.js all", "build": "node build.js", @@ -28,13 +34,12 @@ }, "author": "Ladybug Team", "license": "MIT", - "devDependencies": { - "chai": "^4.4.1", - "mocha": "^10.4.0", - "tmp": "^0.2.3" + "engines": { + "node": ">=20.0.0" }, + "devDependencies": {}, "dependencies": { - "cmake-js": "^7.3.0", - "node-addon-api": "^6.0.0" + "cmake-js": "^8.0.0", + "node-addon-api": "^8.5.0" } -} +} \ No newline at end of file diff --git a/tools/nodejs_api/src_cpp/include/node_connection.h b/tools/nodejs_api/src_cpp/include/node_connection.h index caacef92c3..17c94fa7b0 100644 --- a/tools/nodejs_api/src_cpp/include/node_connection.h +++ b/tools/nodejs_api/src_cpp/include/node_connection.h @@ -8,6 +8,7 @@ #include "node_prepared_statement.h" #include "node_progress_bar_display.h" #include "node_query_result.h" +#include "node_scan_replacement.h" #include using namespace lbug::main; @@ -30,15 +31,22 @@ class NodeConnection : public Napi::ObjectWrap { void InitCppConnection(); void SetMaxNumThreadForExec(const Napi::CallbackInfo& info); void SetQueryTimeout(const Napi::CallbackInfo& info); + void Interrupt(const Napi::CallbackInfo& info); Napi::Value ExecuteAsync(const Napi::CallbackInfo& info); Napi::Value QueryAsync(const Napi::CallbackInfo& info); Napi::Value ExecuteSync(const Napi::CallbackInfo& info); Napi::Value QuerySync(const Napi::CallbackInfo& info); void Close(const Napi::CallbackInfo& info); + Napi::Value RegisterStream(const Napi::CallbackInfo& info); + void UnregisterStream(const Napi::CallbackInfo& info); + void ReturnChunk(const Napi::CallbackInfo& info); + Napi::Value GetNumNodes(const Napi::CallbackInfo& info); + Napi::Value GetNumRels(const Napi::CallbackInfo& info); private: std::shared_ptr database; std::shared_ptr connection; + std::unique_ptr streamRegistry; }; class ConnectionInitAsyncWorker : public Napi::AsyncWorker { diff --git a/tools/nodejs_api/src_cpp/include/node_progress_bar_display.h b/tools/nodejs_api/src_cpp/include/node_progress_bar_display.h index 7813820cb0..e777c94af5 100644 --- a/tools/nodejs_api/src_cpp/include/node_progress_bar_display.h +++ b/tools/nodejs_api/src_cpp/include/node_progress_bar_display.h @@ -13,6 +13,8 @@ using namespace common; */ class NodeProgressBarDisplay : public ProgressBarDisplay { public: + ~NodeProgressBarDisplay() override; + void updateProgress(uint64_t queryID, double newPipelineProgress, uint32_t newNumPipelinesFinished) override; diff --git a/tools/nodejs_api/src_cpp/include/node_query_result.h b/tools/nodejs_api/src_cpp/include/node_query_result.h index b9ee4db979..0b07c97b49 100644 --- a/tools/nodejs_api/src_cpp/include/node_query_result.h +++ b/tools/nodejs_api/src_cpp/include/node_query_result.h @@ -37,6 +37,7 @@ class NodeQueryResult : public Napi::ObjectWrap { Napi::Value GetColumnNamesSync(const Napi::CallbackInfo& info); Napi::Value GetQuerySummarySync(const Napi::CallbackInfo& info); Napi::Value GetQuerySummaryAsync(const Napi::CallbackInfo& info); + Napi::Value GetToStringSync(const Napi::CallbackInfo& info); void PopulateColumnNames(); void Close(const Napi::CallbackInfo& info); void Close(); @@ -102,6 +103,7 @@ class NodeQueryResultGetNextAsyncWorker : public Napi::AsyncWorker { try { if (!nodeQueryResult->queryResult->hasNext()) { cppTuple.reset(); + return; } cppTuple = nodeQueryResult->queryResult->getNext(); } catch (const std::exception& exc) { @@ -112,7 +114,7 @@ class NodeQueryResultGetNextAsyncWorker : public Napi::AsyncWorker { inline void OnOK() override { auto env = Env(); if (cppTuple == nullptr) { - Callback().Call({env.Null(), env.Undefined()}); + Callback().Call({env.Null(), env.Null()}); return; } Napi::Object nodeTuple = Napi::Object::New(env); diff --git a/tools/nodejs_api/src_cpp/include/node_scan_replacement.h b/tools/nodejs_api/src_cpp/include/node_scan_replacement.h new file mode 100644 index 0000000000..bd0109005a --- /dev/null +++ b/tools/nodejs_api/src_cpp/include/node_scan_replacement.h @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "function/table/scan_replacement.h" +#include "function/table/table_function.h" +#include + +namespace lbug { +namespace main { +class Connection; +} +namespace common { +class LogicalType; +} +} + +struct NodeStreamChunkRequest { + std::mutex mtx; + std::condition_variable cv; + std::vector> rows; + std::vector columnNames; // schema order for object rows + bool done = false; + bool filled = false; +}; + +struct NodeStreamSourceState { + Napi::ThreadSafeFunction getChunkTsf; + std::vector columnNames; + std::vector columnTypes; +}; + +class NodeStreamRegistry { +public: + void registerSource(const std::string& name, Napi::ThreadSafeFunction tsf, + std::vector columnNames, + std::vector columnTypes); + void unregisterSource(const std::string& name); + std::vector lookup(const std::string& name) const; + std::unique_ptr replace( + std::span handles) const; + + static NodeStreamChunkRequest* getChunkRequest(uint64_t requestId); + static void setChunkRequest(uint64_t requestId, std::unique_ptr req); + static uint64_t nextRequestId(); + +private: + mutable std::mutex mtx_; + std::unordered_map> sources_; +}; + +void addNodeScanReplacement(lbug::main::Connection* connection, NodeStreamRegistry* registry); + +void returnChunkFromJS(uint64_t requestId, Napi::Array rowsNapi, bool done); diff --git a/tools/nodejs_api/src_cpp/include/node_stream_scan.h b/tools/nodejs_api/src_cpp/include/node_stream_scan.h new file mode 100644 index 0000000000..5c7328b6be --- /dev/null +++ b/tools/nodejs_api/src_cpp/include/node_stream_scan.h @@ -0,0 +1,29 @@ +#pragma once + +#include "function/table/bind_data.h" +#include "function/table/table_function.h" + +struct NodeStreamSourceState; // defined in node_scan_replacement.h + +struct NodeStreamScanFunctionData : lbug::function::TableFuncBindData { + std::shared_ptr state; + + NodeStreamScanFunctionData(lbug::binder::expression_vector columns, + std::shared_ptr state) + : TableFuncBindData(std::move(columns), 0), state(std::move(state)) {} + + std::unique_ptr copy() const override { + return std::make_unique(columns, state); + } +}; + +namespace lbug { +namespace function { + +struct NodeStreamScanFunction { + static constexpr const char* name = "NODE_STREAM_SCAN"; + static TableFunction getFunction(); +}; + +} // namespace function +} // namespace lbug diff --git a/tools/nodejs_api/src_cpp/include/node_util.h b/tools/nodejs_api/src_cpp/include/node_util.h index 4ea7a50674..cd27b21371 100644 --- a/tools/nodejs_api/src_cpp/include/node_util.h +++ b/tools/nodejs_api/src_cpp/include/node_util.h @@ -10,10 +10,10 @@ class Util { static Napi::Value ConvertToNapiObject(const Value& value, Napi::Env env); static std::unordered_map> TransformParametersForExec( Napi::Array params); + static Value TransformNapiValue(Napi::Value napiValue); private: static Napi::Object ConvertNodeIdToNapiObject(const nodeID_t& nodeId, Napi::Env env); - static Value TransformNapiValue(Napi::Value napiValue); const static int64_t JS_MAX_SAFE_INTEGER = 9007199254740991; const static int64_t JS_MIN_SAFE_INTEGER = -9007199254740991; }; diff --git a/tools/nodejs_api/src_cpp/node_connection.cpp b/tools/nodejs_api/src_cpp/node_connection.cpp index 404903904f..6ec6b0e65a 100644 --- a/tools/nodejs_api/src_cpp/node_connection.cpp +++ b/tools/nodejs_api/src_cpp/node_connection.cpp @@ -1,11 +1,14 @@ #include "include/node_connection.h" +#include #include #include "include/node_database.h" #include "include/node_query_result.h" +#include "include/node_scan_replacement.h" #include "include/node_util.h" #include "main/lbug.h" +#include "main/storage_driver.h" Napi::Object NodeConnection::Init(Napi::Env env, Napi::Object exports) { Napi::HandleScope scope(env); @@ -19,7 +22,13 @@ Napi::Object NodeConnection::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("querySync", &NodeConnection::QuerySync), InstanceMethod("setMaxNumThreadForExec", &NodeConnection::SetMaxNumThreadForExec), InstanceMethod("setQueryTimeout", &NodeConnection::SetQueryTimeout), - InstanceMethod("close", &NodeConnection::Close)}); + InstanceMethod("interrupt", &NodeConnection::Interrupt), + InstanceMethod("close", &NodeConnection::Close), + InstanceMethod("registerStream", &NodeConnection::RegisterStream), + InstanceMethod("unregisterStream", &NodeConnection::UnregisterStream), + InstanceMethod("returnChunk", &NodeConnection::ReturnChunk), + InstanceMethod("getNumNodes", &NodeConnection::GetNumNodes), + InstanceMethod("getNumRels", &NodeConnection::GetNumRels)}); exports.Set("NodeConnection", t); return exports; @@ -57,6 +66,8 @@ void NodeConnection::InitCppConnection() { this->connection = std::make_shared(database.get()); ProgressBar::Get(*connection->getClientContext()) ->setDisplay(std::make_shared()); + streamRegistry = std::make_unique(); + addNodeScanReplacement(connection.get(), streamRegistry.get()); // After the connection is initialized, we do not need to hold a reference to the database. database.reset(); } @@ -83,9 +94,16 @@ void NodeConnection::SetQueryTimeout(const Napi::CallbackInfo& info) { } } +void NodeConnection::Interrupt(const Napi::CallbackInfo& /* info */) { + if (this->connection) { + this->connection->interrupt(); + } +} + void NodeConnection::Close(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); Napi::HandleScope scope(env); + streamRegistry.reset(); this->connection.reset(); } @@ -157,3 +175,139 @@ Napi::Value NodeConnection::QueryAsync(const Napi::CallbackInfo& info) { asyncWorker->Queue(); return info.Env().Undefined(); } + +static lbug::common::LogicalType parseColumnType(const std::string& typeStr) { + std::string upper = typeStr; + std::transform(upper.begin(), upper.end(), upper.begin(), ::toupper); + if (upper == "INT64") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT64); + if (upper == "INT32") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT32); + if (upper == "INT16") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT16); + if (upper == "INT8") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT8); + if (upper == "UINT64") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT64); + if (upper == "UINT32") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT32); + if (upper == "UINT16") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT16); + if (upper == "UINT8") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT8); + if (upper == "DOUBLE") return lbug::common::LogicalType(lbug::common::LogicalTypeID::DOUBLE); + if (upper == "FLOAT") return lbug::common::LogicalType(lbug::common::LogicalTypeID::FLOAT); + if (upper == "STRING") return lbug::common::LogicalType(lbug::common::LogicalTypeID::STRING); + if (upper == "BOOL" || upper == "BOOLEAN") + return lbug::common::LogicalType(lbug::common::LogicalTypeID::BOOL); + if (upper == "DATE") return lbug::common::LogicalType(lbug::common::LogicalTypeID::DATE); + if (upper == "TIMESTAMP") + return lbug::common::LogicalType(lbug::common::LogicalTypeID::TIMESTAMP); + throw std::runtime_error("Unsupported column type for registerStream: " + typeStr); +} + +Napi::Value NodeConnection::RegisterStream(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + if (!connection || !streamRegistry) { + Napi::Error::New(env, "Connection not initialized.").ThrowAsJavaScriptException(); + return env.Undefined(); + } + if (info.Length() < 3 || !info[0].IsString() || !info[1].IsFunction() || !info[2].IsArray()) { + Napi::Error::New(env, + "registerStream(name, getChunkCallback, columns): name string, getChunkCallback " + "function(requestId), columns array of { name, type }.") + .ThrowAsJavaScriptException(); + return env.Undefined(); + } + std::string name = info[0].As().Utf8Value(); + Napi::Function getChunkCallback = info[1].As(); + Napi::Array columnsArr = info[2].As(); + std::vector columnNames; + std::vector columnTypes; + for (uint32_t i = 0; i < columnsArr.Length(); i++) { + Napi::Value col = columnsArr.Get(i); + if (!col.IsObject()) continue; + Napi::Object obj = col.As(); + if (!obj.Get("name").IsString() || !obj.Get("type").IsString()) continue; + columnNames.push_back(obj.Get("name").As().Utf8Value()); + columnTypes.push_back( + parseColumnType(obj.Get("type").As().Utf8Value())); + } + if (columnNames.empty()) { + Napi::Error::New(env, "registerStream: at least one column required.").ThrowAsJavaScriptException(); + return env.Undefined(); + } + try { + auto tsf = Napi::ThreadSafeFunction::New( + env, getChunkCallback, "NodeStreamGetChunk", 0, 1); + streamRegistry->registerSource(name, std::move(tsf), std::move(columnNames), + std::move(columnTypes)); + } catch (const std::exception& exc) { + Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); + } + return env.Undefined(); +} + +void NodeConnection::UnregisterStream(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + if (!streamRegistry) return; + if (info.Length() < 1 || !info[0].IsString()) { + Napi::Error::New(env, "unregisterStream(name): name string.").ThrowAsJavaScriptException(); + return; + } + streamRegistry->unregisterSource(info[0].As().Utf8Value()); +} + +void NodeConnection::ReturnChunk(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + if (info.Length() < 3 || !info[0].IsNumber() || !info[1].IsArray() || !info[2].IsBoolean()) { + Napi::Error::New(env, + "returnChunk(requestId, rows, done): requestId number, rows array of rows, done boolean.") + .ThrowAsJavaScriptException(); + return; + } + uint64_t requestId = static_cast(info[0].ToNumber().Int64Value()); + Napi::Array rows = info[1].As(); + bool done = info[2].ToBoolean().Value(); + returnChunkFromJS(requestId, rows, done); +} + +Napi::Value NodeConnection::GetNumNodes(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + if (!connection) { + Napi::Error::New(env, "Connection not initialized.").ThrowAsJavaScriptException(); + return env.Undefined(); + } + if (info.Length() < 1 || !info[0].IsString()) { + Napi::Error::New(env, "getNumNodes(nodeName): nodeName string required.").ThrowAsJavaScriptException(); + return env.Undefined(); + } + try { + Database* db = connection->getClientContext()->getDatabase(); + StorageDriver storageDriver(db); + std::string nodeName = info[0].As().Utf8Value(); + uint64_t count = storageDriver.getNumNodes(nodeName); + return Napi::Number::New(env, static_cast(count)); + } catch (const std::exception& exc) { + Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); + } + return env.Undefined(); +} + +Napi::Value NodeConnection::GetNumRels(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + if (!connection) { + Napi::Error::New(env, "Connection not initialized.").ThrowAsJavaScriptException(); + return env.Undefined(); + } + if (info.Length() < 1 || !info[0].IsString()) { + Napi::Error::New(env, "getNumRels(relName): relName string required.").ThrowAsJavaScriptException(); + return env.Undefined(); + } + try { + Database* db = connection->getClientContext()->getDatabase(); + StorageDriver storageDriver(db); + std::string relName = info[0].As().Utf8Value(); + uint64_t count = storageDriver.getNumRels(relName); + return Napi::Number::New(env, static_cast(count)); + } catch (const std::exception& exc) { + Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); + } + return env.Undefined(); +} diff --git a/tools/nodejs_api/src_cpp/node_progress_bar_display.cpp b/tools/nodejs_api/src_cpp/node_progress_bar_display.cpp index 352775624a..a76dbd953e 100644 --- a/tools/nodejs_api/src_cpp/node_progress_bar_display.cpp +++ b/tools/nodejs_api/src_cpp/node_progress_bar_display.cpp @@ -3,6 +3,14 @@ using namespace lbug; using namespace common; +NodeProgressBarDisplay::~NodeProgressBarDisplay() { + std::unique_lock lock(callbackMutex); + for (auto& kv : queryCallbacks) { + kv.second.Release(); + } + queryCallbacks.clear(); +} + void NodeProgressBarDisplay::updateProgress(uint64_t queryID, double newPipelineProgress, uint32_t newNumPipelinesFinished) { if (numPipelines == 0) { diff --git a/tools/nodejs_api/src_cpp/node_query_result.cpp b/tools/nodejs_api/src_cpp/node_query_result.cpp index 24c18222ac..d202b705d4 100644 --- a/tools/nodejs_api/src_cpp/node_query_result.cpp +++ b/tools/nodejs_api/src_cpp/node_query_result.cpp @@ -25,6 +25,7 @@ Napi::Object NodeQueryResult::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("getColumnNamesSync", &NodeQueryResult::GetColumnNamesSync), InstanceMethod("getQuerySummaryAsync", &NodeQueryResult::GetQuerySummaryAsync), InstanceMethod("getQuerySummarySync", &NodeQueryResult::GetQuerySummarySync), + InstanceMethod("toStringSync", &NodeQueryResult::GetToStringSync), InstanceMethod("close", &NodeQueryResult::Close)}); exports.Set("NodeQueryResult", t); @@ -228,6 +229,17 @@ Napi::Value NodeQueryResult::GetQuerySummarySync(const Napi::CallbackInfo& info) return env.Undefined(); } +Napi::Value NodeQueryResult::GetToStringSync(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + try { + return Napi::String::New(env, this->queryResult->toString()); + } catch (const std::exception& exc) { + Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); + } + return env.Undefined(); +} + void NodeQueryResult::Close(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); Napi::HandleScope scope(env); diff --git a/tools/nodejs_api/src_cpp/node_scan_replacement.cpp b/tools/nodejs_api/src_cpp/node_scan_replacement.cpp new file mode 100644 index 0000000000..8fdb757f46 --- /dev/null +++ b/tools/nodejs_api/src_cpp/node_scan_replacement.cpp @@ -0,0 +1,134 @@ +#include "include/node_scan_replacement.h" +#include "include/node_stream_scan.h" +#include "include/node_util.h" + +#include "function/table/bind_input.h" +#include "main/client_context.h" +#include "main/connection.h" + +#include + +using namespace lbug::common; +using namespace lbug::function; +using namespace lbug::main; + +namespace { + +std::mutex g_requestMutex; +std::atomic g_nextRequestId{1}; +std::unordered_map> g_chunkRequests; + +} // namespace + +void NodeStreamRegistry::registerSource(const std::string& name, Napi::ThreadSafeFunction tsf, + std::vector columnNames, std::vector columnTypes) { + std::lock_guard lock(mtx_); + auto state = std::make_shared(); + state->getChunkTsf = std::move(tsf); + state->columnNames = std::move(columnNames); + state->columnTypes = std::move(columnTypes); + sources_[name] = std::move(state); +} + +void NodeStreamRegistry::unregisterSource(const std::string& name) { + std::lock_guard lock(mtx_); + sources_.erase(name); +} + +std::vector NodeStreamRegistry::lookup(const std::string& name) const { + std::lock_guard lock(mtx_); + auto it = sources_.find(name); + if (it == sources_.end()) { + return {}; + } + return {reinterpret_cast(it->second.get())}; +} + +std::unique_ptr NodeStreamRegistry::replace( + std::span handles) const { + if (handles.empty()) { + return nullptr; + } + auto* statePtr = reinterpret_cast(handles[0]); + auto state = std::shared_ptr(statePtr, [](NodeStreamSourceState*) {}); + auto data = std::make_unique(); + data->func = NodeStreamScanFunction::getFunction(); + data->bindInput.addLiteralParam(Value::createValue(reinterpret_cast(statePtr))); + return data; +} + +NodeStreamChunkRequest* NodeStreamRegistry::getChunkRequest(uint64_t requestId) { + std::lock_guard lock(g_requestMutex); + auto it = g_chunkRequests.find(requestId); + return it != g_chunkRequests.end() ? it->second.get() : nullptr; +} + +void NodeStreamRegistry::setChunkRequest(uint64_t requestId, + std::unique_ptr req) { + std::lock_guard lock(g_requestMutex); + if (req) { + g_chunkRequests[requestId] = std::move(req); + } else { + g_chunkRequests.erase(requestId); + } +} + +uint64_t NodeStreamRegistry::nextRequestId() { + return g_nextRequestId++; +} + +static std::vector lookupNodeStream(const std::string& objectName, + void* registryVoid) { + auto* registry = static_cast(registryVoid); + return registry->lookup(objectName); +} + +static std::unique_ptr replaceNodeStream( + std::span handles, void* registryVoid) { + auto* registry = static_cast(registryVoid); + return registry->replace(handles); +} + +void addNodeScanReplacement(Connection* connection, NodeStreamRegistry* registry) { + auto lookup = [registry](const std::string& name) { + return lookupNodeStream(name, registry); + }; + auto replace = [registry](std::span handles) { + return replaceNodeStream(handles, registry); + }; + connection->getClientContext()->addScanReplace(ScanReplacement(std::move(lookup), replace)); +} + +void returnChunkFromJS(uint64_t requestId, Napi::Array rowsNapi, bool done) { + auto* req = NodeStreamRegistry::getChunkRequest(requestId); + if (!req) { + return; + } + std::vector> rows; + const size_t numRows = rowsNapi.Length(); + rows.reserve(numRows); + for (size_t r = 0; r < numRows; r++) { + Napi::Value rowVal = rowsNapi.Get(r); + std::vector row; + if (rowVal.IsArray()) { + auto arr = rowVal.As(); + for (size_t c = 0; c < arr.Length(); c++) { + row.push_back(Util::TransformNapiValue(arr.Get(c))); + } + } else if (rowVal.IsObject() && !rowVal.IsNull() && !rowVal.IsUndefined()) { + auto obj = rowVal.As(); + const auto& colNames = req->columnNames; + for (const auto& colName : colNames) { + row.push_back(Util::TransformNapiValue(obj.Get(colName))); + } + } + rows.push_back(std::move(row)); + } + { + std::lock_guard lock(req->mtx); + req->rows = std::move(rows); + req->done = done; + req->filled = true; + } + req->cv.notify_one(); +} diff --git a/tools/nodejs_api/src_cpp/node_stream_scan.cpp b/tools/nodejs_api/src_cpp/node_stream_scan.cpp new file mode 100644 index 0000000000..529c10b91d --- /dev/null +++ b/tools/nodejs_api/src_cpp/node_stream_scan.cpp @@ -0,0 +1,103 @@ +#include "include/node_stream_scan.h" +#include "include/node_scan_replacement.h" + +#include "binder/binder.h" +#include "common/constants.h" +#include "common/system_config.h" +#include "function/table/bind_input.h" +#include "processor/execution_context.h" +#include "processor/result/factorized_table.h" + +#include + +using namespace lbug::common; +using namespace lbug::function; + +namespace lbug { + +namespace { +std::mutex g_nodeStreamTableFuncMutex; +} + +static std::unique_ptr bindFunc(lbug::main::ClientContext*, + const TableFuncBindInput* input) { + auto* statePtr = reinterpret_cast(input->getLiteralVal(0)); + KU_ASSERT(statePtr != nullptr); + std::shared_ptr state(statePtr, [](NodeStreamSourceState*) {}); + auto columns = input->binder->createVariables(state->columnNames, state->columnTypes); + return std::make_unique(std::move(columns), state); +} + +static std::unique_ptr initSharedState( + const TableFuncInitSharedStateInput&) { + return std::make_unique(0); +} + +static std::unique_ptr initLocalState( + const TableFuncInitLocalStateInput&) { + return std::make_unique(); +} + +static offset_t tableFunc(const TableFuncInput& input, TableFuncOutput& output) { + auto* bindData = input.bindData->constPtrCast(); + auto& state = *bindData->state; + std::unique_lock streamLock(g_nodeStreamTableFuncMutex); + const uint64_t requestId = NodeStreamRegistry::nextRequestId(); + auto req = std::make_unique(); + req->columnNames = state.columnNames; + NodeStreamRegistry::setChunkRequest(requestId, std::move(req)); + NodeStreamChunkRequest* reqPtr = NodeStreamRegistry::getChunkRequest(requestId); + KU_ASSERT(reqPtr != nullptr); + + state.getChunkTsf.BlockingCall(&requestId, + [](Napi::Env env, Napi::Function jsCallback, const uint64_t* idPtr) { + jsCallback.Call({Napi::Number::New(env, static_cast(*idPtr))}); + }); + + std::vector> rowsCopy; + { + std::unique_lock lock(reqPtr->mtx); + reqPtr->cv.wait(lock, [reqPtr] { return reqPtr->filled; }); + rowsCopy = std::move(reqPtr->rows); + } + NodeStreamRegistry::setChunkRequest(requestId, nullptr); + streamLock.unlock(); + + const offset_t numRows = static_cast(rowsCopy.size()); + if (numRows == 0) { + return 0; + } + + const auto numCols = bindData->getNumColumns(); + const auto cap = std::min(numRows, static_cast(DEFAULT_VECTOR_CAPACITY)); + for (offset_t r = 0; r < cap; r++) { + for (auto c = 0u; c < numCols; c++) { + auto& vec = output.dataChunk.getValueVectorMutable(c); + if (c < rowsCopy[r].size() && !rowsCopy[r][c].isNull()) { + vec.setNull(r, false); + vec.copyFromValue(r, rowsCopy[r][c]); + } else { + vec.setNull(r, true); + } + } + } + output.dataChunk.state->getSelVectorUnsafe().setSelSize(cap); + return cap; +} + +static double progressFunc(TableFuncSharedState*) { + return 0.0; +} + +TableFunction NodeStreamScanFunction::getFunction() { + TableFunction func(name, std::vector{LogicalTypeID::POINTER}); + func.tableFunc = tableFunc; + func.bindFunc = bindFunc; + func.initSharedStateFunc = initSharedState; + func.initLocalStateFunc = initLocalState; + func.progressFunc = progressFunc; + func.canParallelFunc = [] { return false; }; + return func; +} + +} // namespace lbug diff --git a/tools/nodejs_api/src_js/connection.js b/tools/nodejs_api/src_js/connection.js index 575d9f27ff..d89e8f58de 100644 --- a/tools/nodejs_api/src_js/connection.js +++ b/tools/nodejs_api/src_js/connection.js @@ -121,13 +121,20 @@ class Connection { * Execute a prepared statement with the given parameters. * @param {lbug.PreparedStatement} preparedStatement the prepared statement to execute. * @param {Object} params a plain object mapping parameter names to values. - * @param {Function} [progressCallback] - Optional callback function that is invoked with the progress of the query execution. The callback receives three arguments: pipelineProgress, numPipelinesFinished, and numPipelines. - * @returns {Promise} a promise that resolves to the query result. The promise is rejected if there is an error. + * @param {Object|Function} [optionsOrProgressCallback] - Options { signal?: AbortSignal, progressCallback?: Function } or legacy progress callback. + * @returns {Promise} a promise that resolves to the query result. Rejects if error or options.signal is aborted. */ - execute(preparedStatement, params = {}, progressCallback) { + execute(preparedStatement, params = {}, optionsOrProgressCallback) { + const { signal, progressCallback } = this._normalizeQueryOptions(optionsOrProgressCallback); return new Promise((resolve, reject) => { + if (progressCallback !== undefined && typeof progressCallback !== "function") { + return reject(new Error("progressCallback must be a function.")); + } + if (optionsOrProgressCallback != null && typeof optionsOrProgressCallback !== "function" && typeof optionsOrProgressCallback !== "object") { + return reject(new Error("progressCallback must be a function.")); + } if ( - !typeof preparedStatement === "object" || + typeof preparedStatement !== "object" || preparedStatement.constructor.name !== "PreparedStatement" ) { return reject( @@ -147,11 +154,29 @@ class Connection { const value = params[key]; paramArray.push([key, value]); } - if (progressCallback && typeof progressCallback !== "function") { - return reject(new Error("progressCallback must be a function.")); + if (signal?.aborted) { + return reject(this._createAbortError()); + } + let abortListener; + const cleanup = () => { + if (signal && abortListener) { + signal.removeEventListener("abort", abortListener); + } + }; + if (signal) { + abortListener = () => { + this.interrupt(); + cleanup(); + reject(this._createAbortError()); + }; + signal.addEventListener("abort", abortListener); } this._getConnection() .then((connection) => { + if (signal?.aborted) { + cleanup(); + return reject(this._createAbortError()); + } const nodeQueryResult = new LbugNative.NodeQueryResult(); try { connection.executeAsync( @@ -159,7 +184,11 @@ class Connection { nodeQueryResult, paramArray, (err) => { + cleanup(); if (err) { + if (signal?.aborted && err.message === "Interrupted.") { + return reject(this._createAbortError()); + } return reject(err); } this._unwrapMultipleQueryResults(nodeQueryResult) @@ -173,10 +202,12 @@ class Connection { progressCallback ); } catch (e) { + cleanup(); return reject(e); } }) .catch((err) => { + cleanup(); return reject(err); }); }); @@ -261,26 +292,65 @@ class Connection { return new PreparedStatement(this, preparedStatement); } + /** + * Interrupt the currently executing query on this connection. + * No-op if the connection is not initialized or no query is running. + */ + interrupt() { + if (this._connection) { + this._connection.interrupt(); + } + } + /** * Execute a query. * @param {String} statement the statement to execute. - * @param {Function} [progressCallback] - Optional callback function that is invoked with the progress of the query execution. The callback receives three arguments: pipelineProgress, numPipelinesFinished, and numPipelines. - * @returns {Promise} a promise that resolves to the query result. The promise is rejected if there is an error. + * @param {Object|Function} [optionsOrProgressCallback] - Options object { signal?: AbortSignal, progressCallback?: Function } or legacy progress callback. + * @returns {Promise} a promise that resolves to the query result. The promise is rejected if there is an error or if options.signal is aborted. */ - query(statement, progressCallback) { + query(statement, optionsOrProgressCallback) { + const { signal, progressCallback } = this._normalizeQueryOptions(optionsOrProgressCallback); return new Promise((resolve, reject) => { + if (progressCallback !== undefined && typeof progressCallback !== "function") { + return reject(new Error("progressCallback must be a function.")); + } + if (optionsOrProgressCallback != null && typeof optionsOrProgressCallback !== "function" && typeof optionsOrProgressCallback !== "object") { + return reject(new Error("progressCallback must be a function.")); + } if (typeof statement !== "string") { return reject(new Error("statement must be a string.")); } - if (progressCallback && typeof progressCallback !== "function") { - return reject(new Error("progressCallback must be a function.")); + if (signal?.aborted) { + return reject(this._createAbortError()); + } + let abortListener; + const cleanup = () => { + if (signal && abortListener) { + signal.removeEventListener("abort", abortListener); + } + }; + if (signal) { + abortListener = () => { + this.interrupt(); + cleanup(); + reject(this._createAbortError()); + }; + signal.addEventListener("abort", abortListener); } this._getConnection() .then((connection) => { + if (signal?.aborted) { + cleanup(); + return reject(this._createAbortError()); + } const nodeQueryResult = new LbugNative.NodeQueryResult(); try { connection.queryAsync(statement, nodeQueryResult, (err) => { + cleanup(); if (err) { + if (signal?.aborted && err.message === "Interrupted.") { + return reject(this._createAbortError()); + } return reject(err); } this._unwrapMultipleQueryResults(nodeQueryResult) @@ -293,15 +363,176 @@ class Connection { }, progressCallback); } catch (e) { + cleanup(); return reject(e); } }) .catch((err) => { + cleanup(); return reject(err); }); }); } + _normalizeQueryOptions(optionsOrProgressCallback) { + if (optionsOrProgressCallback == null) { + return { signal: undefined, progressCallback: undefined }; + } + if (typeof optionsOrProgressCallback === "function") { + return { signal: undefined, progressCallback: optionsOrProgressCallback }; + } + if (typeof optionsOrProgressCallback === "object" && optionsOrProgressCallback !== null) { + return { + signal: optionsOrProgressCallback.signal, + progressCallback: optionsOrProgressCallback.progressCallback, + }; + } + return { signal: undefined, progressCallback: undefined }; + } + + _createAbortError() { + return new DOMException("The operation was aborted.", "AbortError"); + } + + /** + * Check that the connection is alive (e.g. for connection pools or health checks). + * Runs a trivial query; rejects if the connection is broken. + * @returns {Promise} resolves to true if the connection is OK. + */ + async ping() { + const result = await this.query("RETURN 1"); + const closeResult = (r) => { + if (Array.isArray(r)) { + r.forEach((q) => q.close()); + } else { + r.close(); + } + }; + closeResult(result); + return true; + } + + /** + * Run EXPLAIN on a Cypher statement and return the plan as a string. + * @param {string} statement – Cypher statement (e.g. "MATCH (a:person) RETURN a") + * @returns {Promise} the plan string (one row per line) + */ + async explain(statement) { + if (typeof statement !== "string") { + throw new Error("explain: statement must be a string."); + } + const trimmed = statement.trim(); + const explainStatement = trimmed.toUpperCase().startsWith("EXPLAIN") ? trimmed : "EXPLAIN " + trimmed; + const result = await this.query(explainStatement); + const single = Array.isArray(result) ? result[0] : result; + const rows = await single.getAll(); + single.close(); + if (rows.length === 0) { + return ""; + } + return rows + .map((row) => Object.values(row).join(" | ")) + .join("\n"); + } + + /** + * Get the number of nodes in a node table. Connection must be initialized. + * @param {string} nodeName – name of the node table (e.g. "User") + * @returns {number} count of nodes + */ + getNumNodes(nodeName) { + if (typeof nodeName !== "string") { + throw new Error("getNumNodes(nodeName): nodeName must be a string."); + } + const connection = this._getConnectionSync(); + return connection.getNumNodes(nodeName); + } + + /** + * Get the number of relationships in a rel table. Connection must be initialized. + * @param {string} relName – name of the rel table (e.g. "Follows") + * @returns {number} count of relationships + */ + getNumRels(relName) { + if (typeof relName !== "string") { + throw new Error("getNumRels(relName): relName must be a string."); + } + const connection = this._getConnectionSync(); + return connection.getNumRels(relName); + } + + /** + * Register a stream source for LOAD FROM name. The source must be AsyncIterable; each yielded + * value is a row (array of column values in schema order, or object keyed by column name). + * Call unregisterStream(name) when done or before reusing the name. + * @param {string} name – name used in Cypher: LOAD FROM name RETURN ... + * @param {AsyncIterable|Object>} source – async iterable of rows + * @param {{ columns: Array<{ name: string, type: string }> }} options – schema (required). type: INT64, INT32, DOUBLE, STRING, BOOL, DATE, etc. + */ + async registerStream(name, source, options = {}) { + if (typeof name !== "string") { + throw new Error("registerStream: name must be a string."); + } + const columns = options.columns; + if (!Array.isArray(columns) || columns.length === 0) { + throw new Error("registerStream: options.columns (array of { name, type }) is required."); + } + const conn = await this._getConnection(); + const it = source[Symbol.asyncIterator] ? source[Symbol.asyncIterator].call(source) : source; + const pending = []; + let consumerRunning = false; + + const toRows = (raw) => { + if (raw == null) return []; + if (Array.isArray(raw)) { + const first = raw[0]; + const isArrayOfRows = + raw.length > 0 && + (Array.isArray(first) || (typeof first === "object" && first !== null && !Array.isArray(first))); + return isArrayOfRows ? raw : [raw]; + } + return [raw]; + }; + + const runConsumer = async () => { + pending.sort((a, b) => a - b); + while (pending.length > 0) { + const requestId = pending.shift(); + try { + const n = await it.next(); + const { rows, done } = { rows: toRows(n.value), done: n.done }; + conn.returnChunk(requestId, rows, done); + } catch (e) { + conn.returnChunk(requestId, [], true); + } + } + consumerRunning = false; + }; + + const getChunk = (requestId) => { + pending.push(requestId); + if (!consumerRunning) { + consumerRunning = true; + setImmediate(() => runConsumer()); + } + }; + conn.registerStream(name, getChunk, columns); + } + + /** + * Unregister a stream source by name. + * @param {string} name – name passed to registerStream + */ + unregisterStream(name) { + if (typeof name !== "string") { + throw new Error("unregisterStream: name must be a string."); + } + if (!this._connection) { + return; + } + this._connection.unregisterStream(name); + } + /** * Execute a query synchronously. * @param {String} statement the statement to execute. This function blocks the main thread for the duration of the query, so use it with caution. @@ -396,6 +627,37 @@ class Connection { } } + /** + * Run a function inside a single write transaction. On success commits, on throw rolls back and rethrows. + * Uses Cypher BEGIN TRANSACTION / COMMIT / ROLLBACK under the hood. + * @param {Function} fn async function to run; can use this connection's query/execute inside. + * @returns {Promise<*>} the value returned by fn. + */ + async transaction(fn) { + if (typeof fn !== "function") { + throw new Error("transaction() requires a function."); + } + const closeResult = (r) => { + if (Array.isArray(r)) { + r.forEach((q) => q.close()); + } else { + r.close(); + } + }; + const beginRes = await this.query("BEGIN TRANSACTION"); + closeResult(beginRes); + try { + const result = await fn(); + const commitRes = await this.query("COMMIT"); + closeResult(commitRes); + return result; + } catch (e) { + const rollbackRes = await this.query("ROLLBACK"); + closeResult(rollbackRes); + throw e; + } + } + /** * Set the timeout for queries. Queries that take longer than the timeout * will be aborted. diff --git a/tools/nodejs_api/src_js/database.js b/tools/nodejs_api/src_js/database.js index dc70582494..b7d7a79d6f 100644 --- a/tools/nodejs_api/src_js/database.js +++ b/tools/nodejs_api/src_js/database.js @@ -2,6 +2,29 @@ const LbugNative = require("./lbug_native.js"); +/** Error code when the database file is locked by another process. */ +const LBUG_DATABASE_LOCKED = "LBUG_DATABASE_LOCKED"; + +const LOCK_ERROR_MESSAGE = "Could not set lock on file"; + +function isLockError(err) { + return err && typeof err.message === "string" && err.message.includes(LOCK_ERROR_MESSAGE); +} + +function normalizeInitError(err) { + if (isLockError(err)) { + const e = new Error(err.message); + e.code = LBUG_DATABASE_LOCKED; + e.cause = err; + return e; + } + return err; +} + +function sleep(ms) { + return new Promise((r) => setTimeout(r, ms)); +} + class Database { /** * Initialize a new Database object. Note that the initialization is done @@ -26,6 +49,8 @@ class Database { * the error occured. * @param {Boolean} enableChecksums If true, the database will use checksums to detect corruption in the * WAL file. + * @param {Number} [openLockRetryMs=5000] When the database file is locked, retry opening for up to this many ms + * (grace period). Only applies to async init(); set to 0 to fail immediately. Ignored for in-memory databases. */ constructor( databasePath, @@ -37,6 +62,7 @@ class Database { checkpointThreshold = -1, throwOnWalReplayFailure = true, enableChecksums = true, + openLockRetryMs = 5000, ) { if (!databasePath) { databasePath = ":memory:"; @@ -53,6 +79,9 @@ class Database { if (typeof checkpointThreshold !== "number" || maxDBSize < -1) { throw new Error("Checkpoint threshold must be a positive integer."); } + if (typeof openLockRetryMs !== "number" || openLockRetryMs < 0) { + throw new Error("openLockRetryMs must be a non-negative number."); + } bufferManagerSize = Math.floor(bufferManagerSize); maxDBSize = Math.floor(maxDBSize); checkpointThreshold = Math.floor(checkpointThreshold); @@ -70,6 +99,8 @@ class Database { this._isInitialized = false; this._initPromise = null; this._isClosed = false; + // Grace period for lock: retry for up to openLockRetryMs (0 = no retry). In-memory has no file lock. + this._openLockRetryMs = databasePath === ":memory:" ? 0 : Math.floor(openLockRetryMs); } /** @@ -91,27 +122,49 @@ class Database { /** * Initialize the database. Calling this function is optional, as the * database is initialized automatically when the first query is executed. + * When the file is locked, init() retries for up to openLockRetryMs (default 5s) before throwing. */ async init() { if (!this._isInitialized) { if (!this._initPromise) { - this._initPromise = new Promise((resolve, reject) => { - this._database.initAsync((err) => { - if (err) { - reject(err); - } else { - try { - this._isInitialized = true; - } catch (e) { - return reject(e); + const self = this; + const tryOnce = () => + new Promise((resolve, reject) => { + self._database.initAsync((err) => { + if (err) reject(err); + else { + self._isInitialized = true; + resolve(); } - resolve(); - } + }); }); - }); + const OPEN_LOCK_DELAY_MS = 200; + + this._initPromise = (async () => { + const start = Date.now(); + for (;;) { + if (self._isClosed) throw new Error("Database is closed."); + try { + await tryOnce(); + return; + } catch (err) { + if (!isLockError(err)) throw normalizeInitError(err); + if ( + self._openLockRetryMs <= 0 || + Date.now() - start >= self._openLockRetryMs + ) { + throw normalizeInitError(err); + } + await sleep(OPEN_LOCK_DELAY_MS); + } + } + })(); + } + try { + await this._initPromise; + } finally { + this._initPromise = null; } - await this._initPromise; - this._initPromise = null; } } @@ -127,7 +180,11 @@ class Database { if (this._isInitialized) { return; } - this._database.initSync(); + try { + this._database.initSync(); + } catch (err) { + throw normalizeInitError(err); + } this._isInitialized = true; } @@ -208,4 +265,6 @@ class Database { } } +Database.LBUG_DATABASE_LOCKED = LBUG_DATABASE_LOCKED; + module.exports = Database; diff --git a/tools/nodejs_api/src_js/index.js b/tools/nodejs_api/src_js/index.js index d7da3f72b5..3a0b35fd1c 100644 --- a/tools/nodejs_api/src_js/index.js +++ b/tools/nodejs_api/src_js/index.js @@ -4,12 +4,16 @@ const Connection = require("./connection.js"); const Database = require("./database.js"); const PreparedStatement = require("./prepared_statement.js"); const QueryResult = require("./query_result.js"); +const { createPool, Pool } = require("./pool.js"); module.exports = { Connection, Database, PreparedStatement, QueryResult, + createPool, + Pool, + LBUG_DATABASE_LOCKED: Database.LBUG_DATABASE_LOCKED, get VERSION() { return Database.getVersion(); }, diff --git a/tools/nodejs_api/src_js/index.mjs b/tools/nodejs_api/src_js/index.mjs index 9293e40683..bd921d7046 100644 --- a/tools/nodejs_api/src_js/index.mjs +++ b/tools/nodejs_api/src_js/index.mjs @@ -5,6 +5,9 @@ export const Database = lbug.Database; export const Connection = lbug.Connection; export const PreparedStatement = lbug.PreparedStatement; export const QueryResult = lbug.QueryResult; +export const createPool = lbug.createPool; +export const Pool = lbug.Pool; +export const LBUG_DATABASE_LOCKED = lbug.LBUG_DATABASE_LOCKED; export const VERSION = lbug.VERSION; export const STORAGE_VERSION = lbug.STORAGE_VERSION; export default lbug; diff --git a/tools/nodejs_api/src_js/lbug.d.ts b/tools/nodejs_api/src_js/lbug.d.ts index 977be1b157..b81fea3b06 100644 --- a/tools/nodejs_api/src_js/lbug.d.ts +++ b/tools/nodejs_api/src_js/lbug.d.ts @@ -22,6 +22,15 @@ export type ProgressCallback = ( numPipelines: number ) => void; +/** + * Options for query() and execute(). + * Use signal to cancel the operation via AbortController. + */ +export interface QueryOptions { + signal?: AbortSignal; + progressCallback?: ProgressCallback; +} + /** * Represents a node ID in the graph database. */ @@ -104,6 +113,63 @@ export interface SystemConfig { checkpointThreshold?: number; } +/** + * Options for createPool(). Same shape as Database constructor args (except path). + */ +export interface PoolDatabaseOptions { + bufferManagerSize?: number; + enableCompression?: boolean; + readOnly?: boolean; + maxDBSize?: number; + autoCheckpoint?: boolean; + checkpointThreshold?: number; + throwOnWalReplayFailure?: boolean; + enableChecksums?: boolean; + openLockRetryMs?: number; +} + +/** + * Options for createPool(). + */ +export interface PoolOptions { + /** Database file path (default ":memory:") */ + databasePath?: string; + /** Same shape as Database constructor options (bufferManagerSize, readOnly, etc.) */ + databaseOptions?: PoolDatabaseOptions; + /** Minimum connections to keep (default 0) */ + minSize?: number; + /** Maximum connections in the pool (required) */ + maxSize: number; + /** Max time to wait for acquire in ms (0 = wait forever, default 0) */ + acquireTimeoutMillis?: number; + /** If true, call conn.ping() before handing out (default false) */ + validateOnAcquire?: boolean; +} + +/** + * Connection pool: acquire/release or run(fn). One shared Database, up to maxSize Connection instances. + */ +export interface Pool { + /** Acquire a connection; must call release(conn) when done. Prefer run(fn) to avoid leaks. */ + acquire(): Promise; + /** Return a connection to the pool. */ + release(conn: Connection): void; + /** Run fn(conn); connection is released in finally (on success or throw). */ + run(fn: (conn: Connection) => Promise): Promise; + /** Close pool: reject new/pending acquire, then close all connections and database. */ + close(): Promise; +} + +/** Pool constructor (use createPool() instead of new Pool()). */ +export type PoolConstructor = new (options: PoolOptions) => Pool; + +/** + * Create a connection pool. + * @param options Pool options (maxSize required; databasePath, databaseOptions, minSize, acquireTimeoutMillis, validateOnAcquire optional) + * @returns Pool instance + */ +export function createPool(options: PoolOptions): Pool; + /** * Represents a Lbug database instance. */ @@ -117,6 +183,9 @@ export class Database { * @param maxDBSize Maximum size of the database in bytes * @param autoCheckpoint Whether to enable automatic checkpoints * @param checkpointThreshold Threshold for automatic checkpoints + * @param throwOnWalReplayFailure If true, WAL replay failures throw; otherwise replay stops at error + * @param enableChecksums If true, use checksums to detect WAL corruption + * @param openLockRetryMs When the file is locked, retry opening for up to this many ms (default 5000). Set 0 to fail immediately. Only for async init(); ignored for :memory: */ constructor( databasePath?: string, @@ -125,12 +194,16 @@ export class Database { readOnly?: boolean, maxDBSize?: number, autoCheckpoint?: boolean, - checkpointThreshold?: number + checkpointThreshold?: number, + throwOnWalReplayFailure?: boolean, + enableChecksums?: boolean, + openLockRetryMs?: number ); /** * Initialize the database. Calling this function is optional, as the * database is initialized automatically when the first query is executed. + * When the file is locked, retries for up to openLockRetryMs (default 5s) before throwing. * @returns Promise that resolves when initialization completes */ init(): Promise; @@ -200,6 +273,12 @@ export class Connection { */ setQueryTimeout(timeoutInMs: number): void; + /** + * Interrupt the currently executing query on this connection. + * No-op if the connection is not initialized or no query is running. + */ + interrupt(): void; + /** * Close the connection. * @returns Promise that resolves when connection is closed @@ -215,13 +294,13 @@ export class Connection { * Execute a prepared statement. * @param preparedStatement The prepared statement to execute * @param params Parameters for the query as a plain object - * @param progressCallback Optional progress callback - * @returns Promise that resolves to the query result(s) + * @param optionsOrProgressCallback Options (e.g. signal for abort) or legacy progress callback + * @returns Promise that resolves to the query result(s). Rejects with DOMException AbortError if signal is aborted. */ execute( preparedStatement: PreparedStatement, params?: Record, - progressCallback?: ProgressCallback + optionsOrProgressCallback?: QueryOptions | ProgressCallback ): Promise; /** @@ -252,20 +331,73 @@ export class Connection { /** * Execute a query. * @param statement The statement to execute - * @param progressCallback Optional progress callback - * @returns Promise that resolves to the query result(s) + * @param optionsOrProgressCallback Options (e.g. signal for abort) or legacy progress callback + * @returns Promise that resolves to the query result(s). Rejects with DOMException AbortError if signal is aborted. */ query( statement: string, - progressCallback?: ProgressCallback + optionsOrProgressCallback?: QueryOptions | ProgressCallback ): Promise; + /** + * Run a function inside a single write transaction. Commits on success, rolls back on throw. + * @param fn Async function that can use this connection's query/execute + * @returns Promise that resolves to the return value of fn + */ + transaction(fn: () => Promise): Promise; + /** * Execute a query synchronously. * @param statement The statement to execute * @returns The query result(s) */ querySync(statement: string): QueryResult | QueryResult[]; + + /** + * Check that the connection is alive (e.g. for pools or health checks). + * @returns Promise that resolves to true if OK, rejects if connection is broken + */ + ping(): Promise; + + /** + * Run EXPLAIN on a Cypher statement and return the plan as a string. + * @param statement Cypher statement (e.g. "MATCH (a:person) RETURN a") + * @returns Promise that resolves to the plan string (one row per line) + */ + explain(statement: string): Promise; + + /** + * Get the number of nodes in a node table. Connection must be initialized. + * @param nodeName Name of the node table (e.g. "User") + * @returns Count of nodes + */ + getNumNodes(nodeName: string): number; + + /** + * Get the number of relationships in a rel table. Connection must be initialized. + * @param relName Name of the rel table (e.g. "Follows") + * @returns Count of relationships + */ + getNumRels(relName: string): number; + + /** + * Register a stream source for LOAD FROM name. Source must be AsyncIterable of rows (array or object). + * Unregister with unregisterStream(name) when done. + * @param name Name used in Cypher: LOAD FROM name RETURN ... + * @param source AsyncIterable of rows (array of column values or object keyed by column name) + * @param options.columns Schema: array of { name: string, type: string } (type: INT64, INT32, DOUBLE, STRING, BOOL, DATE, etc.) + */ + registerStream( + name: string, + source: AsyncIterable>, + options: { columns: Array<{ name: string; type: string }> } + ): Promise; + + /** + * Unregister a stream source by name. + * @param name Name passed to registerStream + */ + unregisterStream(name: string): void; } /** @@ -286,11 +418,25 @@ export class PreparedStatement { getErrorMessage(): string; } +/** + * Query summary with compiling and execution times (milliseconds). + */ +export interface QuerySummary { + compilingTime: number; + executionTime: number; +} + /** * Represents the results of a query execution. * Note: This class is created internally by Connection query methods. + * Supports async iteration: for await (const row of result) { ... } */ -export class QueryResult { +export class QueryResult implements AsyncIterable | null> { + /** + * Async iterator for row-by-row consumption (for await...of). + */ + [Symbol.asyncIterator](): AsyncIterator | null>; + /** * Reset the iterator for reading results. */ @@ -320,6 +466,18 @@ export class QueryResult { */ getNextSync(): Record | null; + /** + * Return the query result as a string (header + rows). For failed queries returns the error message. + * @returns String representation of the result + */ + toString(): string; + + /** + * Return a Node.js Readable stream (object mode) that yields one row per chunk. + * @returns Readable stream of row objects + */ + toStream(): import("stream").Readable; + /** * Iterate through the query result with callback functions. * @param resultCallback Callback function called for each row @@ -378,12 +536,30 @@ export class QueryResult { */ getColumnNamesSync(): string[]; + /** + * Get the query summary (compiling and execution time in milliseconds). + * @returns Promise that resolves to the query summary + */ + getQuerySummary(): Promise; + + /** + * Get the query summary synchronously. + * @returns The query summary + */ + getQuerySummarySync(): QuerySummary; + /** * Close the result set and release resources. */ close(): void; } +/** + * Error code when the database file is locked by another process. + * Use with init() / initSync() or first query: catch and check err.code === LBUG_DATABASE_LOCKED. + */ +export const LBUG_DATABASE_LOCKED: "LBUG_DATABASE_LOCKED"; + /** * Default export for the Lbug module. */ @@ -392,6 +568,9 @@ declare const lbug: { Connection: typeof Connection; PreparedStatement: typeof PreparedStatement; QueryResult: typeof QueryResult; + createPool: typeof createPool; + Pool: PoolConstructor; + LBUG_DATABASE_LOCKED: typeof LBUG_DATABASE_LOCKED; VERSION: string; STORAGE_VERSION: bigint; }; diff --git a/tools/nodejs_api/src_js/pool.js b/tools/nodejs_api/src_js/pool.js new file mode 100644 index 0000000000..cc0d585113 --- /dev/null +++ b/tools/nodejs_api/src_js/pool.js @@ -0,0 +1,222 @@ +"use strict"; + +const Database = require("./database.js"); +const Connection = require("./connection.js"); + +const DEFAULT_MIN_SIZE = 0; +const DEFAULT_ACQUIRE_TIMEOUT_MILLIS = 0; +const DEFAULT_VALIDATE_ON_ACQUIRE = false; + +function createDatabase(path, databaseOptions) { + const o = databaseOptions || {}; + return new Database( + path, + o.bufferManagerSize ?? 0, + o.enableCompression ?? true, + o.readOnly ?? false, + o.maxDBSize ?? 0, + o.autoCheckpoint ?? true, + o.checkpointThreshold ?? -1, + o.throwOnWalReplayFailure ?? true, + o.enableChecksums ?? true, + o.openLockRetryMs ?? 5000 + ); +} + +class Pool { + constructor(options) { + if (options == null || typeof options !== "object") { + throw new Error("createPool(options): options must be an object."); + } + const path = options.databasePath; + if (path !== undefined && path !== null && path !== "" && typeof path !== "string") { + throw new Error("createPool: databasePath must be a string or empty."); + } + const maxSize = options.maxSize; + if (typeof maxSize !== "number" || maxSize < 1 || !Number.isInteger(maxSize)) { + throw new Error("createPool: maxSize must be a positive integer."); + } + const minSize = options.minSize ?? DEFAULT_MIN_SIZE; + if (typeof minSize !== "number" || minSize < 0 || !Number.isInteger(minSize) || minSize > maxSize) { + throw new Error("createPool: minSize must be a non-negative integer not greater than maxSize."); + } + const acquireTimeoutMillis = options.acquireTimeoutMillis ?? DEFAULT_ACQUIRE_TIMEOUT_MILLIS; + if (typeof acquireTimeoutMillis !== "number" || acquireTimeoutMillis < 0) { + throw new Error("createPool: acquireTimeoutMillis must be a non-negative number."); + } + const validateOnAcquire = options.validateOnAcquire ?? DEFAULT_VALIDATE_ON_ACQUIRE; + + this._databasePath = path == null || path === "" ? ":memory:" : path; + this._databaseOptions = options.databaseOptions || null; + this._maxSize = maxSize; + this._minSize = minSize; + this._acquireTimeoutMillis = acquireTimeoutMillis; + this._validateOnAcquire = Boolean(validateOnAcquire); + + this._database = null; + this._idle = []; + this._allConnections = []; + this._checkedOut = new Set(); + this._waiters = []; + this._closed = false; + } + + _ensureDatabase() { + if (this._database === null) { + this._database = createDatabase(this._databasePath, this._databaseOptions); + } + return this._database; + } + + _createConnection() { + const db = this._ensureDatabase(); + const conn = new Connection(db); + this._allConnections.push(conn); + return conn; + } + + _wakeNextWaiter(conn) { + while (this._waiters.length > 0) { + const w = this._waiters.shift(); + if (w.timer) clearTimeout(w.timer); + this._checkedOut.add(conn); + w.resolve(conn); + return; + } + this._idle.push(conn); + } + + /** + * Acquire a connection from the pool. Must call release(conn) when done (e.g. in finally). + * Prefer pool.run(fn) to avoid forgetting release. + * @returns {Promise} + */ + acquire() { + if (this._closed) { + return Promise.reject(new Error("Pool is closed.")); + } + + while (this._allConnections.length < this._minSize) { + this._idle.push(this._createConnection()); + } + if (this._idle.length > 0) { + const conn = this._idle.shift(); + this._checkedOut.add(conn); + if (this._validateOnAcquire) { + return conn.ping().then(() => conn); + } + return Promise.resolve(conn); + } + if (this._allConnections.length < this._maxSize) { + const conn = this._createConnection(); + this._checkedOut.add(conn); + if (this._validateOnAcquire) { + return conn.ping().then(() => conn); + } + return Promise.resolve(conn); + } + + return new Promise((resolve, reject) => { + const entry = { + resolve, + reject, + timer: null, + }; + if (this._acquireTimeoutMillis > 0) { + entry.timer = setTimeout(() => { + const i = this._waiters.indexOf(entry); + if (i !== -1) { + this._waiters.splice(i, 1); + reject(new Error("Pool acquire timed out.")); + } + }, this._acquireTimeoutMillis); + } + this._waiters.push(entry); + }); + } + + /** + * Return a connection to the pool. No-op if pool is closed. + * @param {lbug.Connection} conn + */ + release(conn) { + if (this._closed) { + return; + } + if ( + conn == null || + typeof conn !== "object" || + conn.constructor.name !== "Connection" + ) { + throw new Error("release(conn): conn must be a Connection from this pool."); + } + if (!this._checkedOut.has(conn)) { + throw new Error("release(conn): connection not from this pool or already released."); + } + this._checkedOut.delete(conn); + this._wakeNextWaiter(conn); + } + + /** + * Run a function with a connection; connection is released in finally (on success or throw). + * @template T + * @param {(conn: lbug.Connection) => Promise} fn + * @returns {Promise} + */ + async run(fn) { + if (typeof fn !== "function") { + throw new Error("pool.run(fn): fn must be a function."); + } + const conn = await this.acquire(); + try { + return await fn(conn); + } finally { + this.release(conn); + } + } + + /** + * Close the pool: reject new and pending acquire, then close all connections and the database. + * @returns {Promise} + */ + async close() { + if (this._closed) { + return; + } + this._closed = true; + const err = new Error("Pool is closed."); + for (const w of this._waiters) { + if (w.timer) clearTimeout(w.timer); + w.reject(err); + } + this._waiters.length = 0; + this._idle.length = 0; + for (const conn of this._allConnections) { + try { + await conn.close(); + } catch (_) { + // ignore + } + } + this._allConnections.length = 0; + if (this._database) { + try { + await this._database.close(); + } catch (_) { + // ignore + } + this._database = null; + } + } +} + +/** + * Create a connection pool. One shared Database; up to maxSize Connection instances. + * @param {lbug.PoolOptions} options + * @returns {lbug.Pool} + */ +function createPool(options) { + return new Pool(options); +} + +module.exports = { createPool, Pool }; diff --git a/tools/nodejs_api/src_js/query_result.js b/tools/nodejs_api/src_js/query_result.js index 944f23db9c..24cfe21cb5 100644 --- a/tools/nodejs_api/src_js/query_result.js +++ b/tools/nodejs_api/src_js/query_result.js @@ -1,6 +1,7 @@ "use strict"; const assert = require("assert"); +const { Readable } = require("stream"); class QueryResult { /** @@ -75,6 +76,15 @@ class QueryResult { return this._queryResult.getNextSync(); } + /** + * Return the query result as a string (header + rows). For failed queries returns the error message. + * @returns {string} + */ + toString() { + this._checkClosed(); + return this._queryResult.toStringSync(); + } + /** * Iterate through the query result with callback functions. * @param {Function} resultCallback the callback function that is called for each row of the query result. @@ -96,6 +106,64 @@ class QueryResult { }); } + /** + * Async iterator for consuming the result row-by-row (e.g. `for await (const row of result)`). + * Does not materialize the full result in memory. + * @returns {AsyncIterator} + */ + [Symbol.asyncIterator]() { + const self = this; + return { + async next() { + self._checkClosed(); + if (!self.hasNext()) { + return { done: true }; + } + try { + const value = await self.getNext(); + if (value === null) { + return { done: true }; + } + return { value, done: false }; + } catch (err) { + return Promise.reject(err); + } + }, + }; + } + + /** + * Return a Node.js Readable stream (object mode) that yields one row per chunk. + * Useful for piping or integrating with stream consumers. Does not require native changes. + * @returns {stream.Readable} Readable stream of row objects. + */ + toStream() { + const self = this; + return new Readable({ + objectMode: true, + read() { + if (self._isClosed) { + return this.push(null); + } + if (!self.hasNext()) { + return this.push(null); + } + self.getNext() + .then((row) => { + if (row !== null && row !== undefined) { + this.push(row); + } + if (!self.hasNext()) { + this.push(null); + } + }) + .catch((err) => { + this.destroy(err); + }); + }, + }); + } + /** * Get all rows of the query result. * @returns {Promise>} a promise that resolves to all rows of the query result. The promise is rejected if there is an error. @@ -229,13 +297,16 @@ class QueryResult { } /** - * Internal function to check if the query result is closed. - * @throws {Error} if the query result is closed. + * Internal function to check if the query result or its connection is closed. + * @throws {Error} if the query result is closed or the connection is closed. */ _checkClosed() { if (this._isClosed) { throw new Error("Query result is closed."); } + if (this._connection._isClosed) { + throw new Error("Connection is closed."); + } } } diff --git a/tools/nodejs_api/test/common.js b/tools/nodejs_api/test/common.js index ab0a954842..036a2bd393 100644 --- a/tools/nodejs_api/test/common.js +++ b/tools/nodejs_api/test/common.js @@ -1,8 +1,39 @@ -global.chai = require("chai"); -global.assert = chai.assert; -global.expect = chai.expect; -chai.should(); -chai.config.includeStack = true; +const nodeAssert = require("node:assert"); +const fs = require("fs/promises"); +const path = require("path"); +const os = require("os"); + +// Chai-like API on top of node:assert for minimal test changes +const assert = Object.create(nodeAssert); +assert.exists = (val, msg) => nodeAssert.ok(val != null, msg || "expected value to exist"); +assert.notExists = (val, msg) => nodeAssert.ok(val == null, msg || "expected value to not exist"); +assert.isNull = (val, msg) => nodeAssert.strictEqual(val, null, msg); +assert.isNotNull = (val, msg) => nodeAssert.notStrictEqual(val, null, msg); +assert.isTrue = (val, msg) => nodeAssert.strictEqual(val, true, msg); +assert.isFalse = (val, msg) => nodeAssert.strictEqual(val, false, msg); +assert.include = (container, value, msg) => + nodeAssert.ok(container.includes(value), msg || `expected ${container} to include ${value}`); +assert.isEmpty = (val, msg) => + nodeAssert.strictEqual(val.length, 0, msg || `expected empty, got length ${val.length}`); +assert.instanceOf = (obj, Ctor, msg) => + nodeAssert.ok(obj instanceof Ctor, msg || `expected instance of ${Ctor.name}`); +assert.isNumber = (val, msg) => + nodeAssert.strictEqual(typeof val, "number", msg); +assert.isString = (val, msg) => + nodeAssert.strictEqual(typeof val, "string", msg); +assert.isAtLeast = (n, min, msg) => + nodeAssert.ok(n >= min, msg || `expected ${n} >= ${min}`); +assert.lengthOf = (arr, n, msg) => + nodeAssert.strictEqual(arr.length, n, msg || `expected length ${n}, got ${arr.length}`); +assert.equal = (a, b, msg) => nodeAssert.strictEqual(a, b, msg); +assert.notEqual = (a, b, msg) => nodeAssert.notStrictEqual(a, b, msg); +assert.deepEqual = (a, b, msg) => nodeAssert.deepStrictEqual(a, b, msg); +assert.approximately = (actual, expected, delta, msg) => + nodeAssert.ok( + Math.abs(actual - expected) <= delta, + msg || `expected ${actual} to be approximately ${expected} (±${delta})` + ); +global.assert = assert; const TEST_INSTALLED = process.env.TEST_INSTALLED || false; if (TEST_INSTALLED) { @@ -15,19 +46,10 @@ if (TEST_INSTALLED) { console.log("Testing locally built version @", lbugPath); } -const tmp = require("tmp"); -const fs = require("fs/promises"); -const path = require("path"); +// Temp dir: os.tmpdir() respects TMPDIR (Unix) and TEMP/TMP (Windows). XDG spec +// does not define a temp directory; industry practice is TMPDIR + mkdtemp (unique names). const initTests = async () => { - const tmpPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); - + const tmpPath = await fs.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpPath, "db.kz"); const db = new lbug.Database(dbPath, 1 << 28 /* 256MB */); const conn = new lbug.Connection(db, 4); @@ -48,16 +70,13 @@ const initTests = async () => { .split("\n"); const dataFileExtension = ["csv", "parquet", "npy", "ttl", "nq", "json", "lbug_extension"]; - const dataFileRegex = new RegExp(`"([^"]+\\.(${dataFileExtension.join('|')}))"`, "gi"); + const dataFileRegex = new RegExp(`"([^"]+\\.(${dataFileExtension.join("|")}))"`, "gi"); for (const line of copy) { if (!line || line.trim().length === 0) { - continue; + continue; } - - // handle multiple data files in one line const statement = line.replace(dataFileRegex, `"${tinysnbDir}$1"`); - await conn.query(statement); } @@ -69,6 +88,7 @@ const initTests = async () => { ); global.dbPath = dbPath; + global.tmpPath = tmpPath; global.db = db; global.conn = conn; }; diff --git a/tools/nodejs_api/test/test.js b/tools/nodejs_api/test/test.js index 4efa4b7e6e..ccafe80d83 100644 --- a/tools/nodejs_api/test/test.js +++ b/tools/nodejs_api/test/test.js @@ -1,8 +1,17 @@ +const { describe, it, before, after } = require("node:test"); +global.describe = describe; +global.it = it; +global.before = before; +global.after = after; + require("./common.js"); -const importTest = (name, path) => { +const path = require("path"); +const fs = require("fs/promises"); + +const importTest = (name, p) => { describe(name, () => { - require(path); + require(p); }); }; @@ -10,12 +19,27 @@ describe("lbug", () => { before(() => { return initTests(); }); - importTest("Database", "./test_database.js"); - importTest("Connection", "./test_connection.js"); - importTest("Query result", "./test_query_result.js"); - importTest("Data types", "./test_data_type.js"); - importTest("Query parameters", "./test_parameter.js"); - importTest("Concurrent query execution", "./test_concurrency.js"); - importTest("Version", "./test_version.js"); - importTest("Synchronous API", "./test_sync_api.js"); + after(async () => { + if (global.conn && !global.conn._isClosed) { + await global.conn.close().catch(() => {}); + } + if (global.db && !global.db._isClosed) { + await global.db.close().catch(() => {}); + } + if (global.tmpPath) { + await fs.rm(global.tmpPath, { recursive: true }).catch(() => {}); + } + // Native addon may keep the event loop alive; force exit so process doesn't hang + process.exit(0); + }); + importTest("Database", path.join(__dirname, "test_database.js")); + importTest("Connection", path.join(__dirname, "test_connection.js")); + importTest("Query result", path.join(__dirname, "test_query_result.js")); + importTest("Data types", path.join(__dirname, "test_data_type.js")); + importTest("Query parameters", path.join(__dirname, "test_parameter.js")); + importTest("Concurrent query execution", path.join(__dirname, "test_concurrency.js")); + importTest("Version", path.join(__dirname, "test_version.js")); + importTest("Synchronous API", path.join(__dirname, "test_sync_api.js")); + importTest("registerStream / LOAD FROM stream", path.join(__dirname, "test_register_stream.js")); + importTest("Resilience (close during/after use)", path.join(__dirname, "test_resilience.js")); }); diff --git a/tools/nodejs_api/test/test_concurrency.js b/tools/nodejs_api/test/test_concurrency.js index 8b70776036..fc611f10f9 100644 --- a/tools/nodejs_api/test/test_concurrency.js +++ b/tools/nodejs_api/test/test_concurrency.js @@ -1,5 +1,3 @@ -const { assert } = require("chai"); - describe("Concurrent query execution within a single connection", function () { it("should dispatch multiple queries concurrently with query strings", async function () { const queryResults = await Promise.all([ @@ -47,30 +45,36 @@ describe("Concurrent query execution across multiple connections", function () { for (let i = 0; i < 5; i++) { connections.push(new lbug.Connection(db)); } - const queryResults = await Promise.all([ - connections[0].query( - "MATCH (a:person) WHERE a.ID = 0 RETURN a.isStudent;" - ), - connections[1].query( - "MATCH (a:person) WHERE a.ID = 2 RETURN a.isStudent;" - ), - connections[2].query( - "MATCH (a:person) WHERE a.ID = 3 RETURN a.isStudent;" - ), - connections[3].query( - "MATCH (a:person) WHERE a.ID = 5 RETURN a.isStudent;" - ), - connections[4].query( - "MATCH (a:person) WHERE a.ID = 7 RETURN a.isStudent;" - ), - ]); - const results = await Promise.all( - queryResults.map((queryResult) => queryResult.getAll()) - ); - assert.isTrue(results[0][0]["a.isStudent"]); - assert.isTrue(results[1][0]["a.isStudent"]); - assert.isFalse(results[2][0]["a.isStudent"]); - assert.isFalse(results[3][0]["a.isStudent"]); - assert.isFalse(results[4][0]["a.isStudent"]); + try { + const queryResults = await Promise.all([ + connections[0].query( + "MATCH (a:person) WHERE a.ID = 0 RETURN a.isStudent;" + ), + connections[1].query( + "MATCH (a:person) WHERE a.ID = 2 RETURN a.isStudent;" + ), + connections[2].query( + "MATCH (a:person) WHERE a.ID = 3 RETURN a.isStudent;" + ), + connections[3].query( + "MATCH (a:person) WHERE a.ID = 5 RETURN a.isStudent;" + ), + connections[4].query( + "MATCH (a:person) WHERE a.ID = 7 RETURN a.isStudent;" + ), + ]); + const results = await Promise.all( + queryResults.map((queryResult) => queryResult.getAll()) + ); + assert.isTrue(results[0][0]["a.isStudent"]); + assert.isTrue(results[1][0]["a.isStudent"]); + assert.isFalse(results[2][0]["a.isStudent"]); + assert.isFalse(results[3][0]["a.isStudent"]); + assert.isFalse(results[4][0]["a.isStudent"]); + } finally { + for (const c of connections) { + if (!c._isClosed) await c.close().catch(() => {}); + } + } }); }); diff --git a/tools/nodejs_api/test/test_connection.js b/tools/nodejs_api/test/test_connection.js index 34345ad26a..b60ca13cb2 100644 --- a/tools/nodejs_api/test/test_connection.js +++ b/tools/nodejs_api/test/test_connection.js @@ -1,5 +1,3 @@ -const { assert } = require("chai"); - describe("Connection constructor", function () { it("should create a connection with a valid database object", async function () { const connection = new lbug.Connection(db); @@ -9,6 +7,7 @@ describe("Connection constructor", function () { assert.exists(connection._connection); assert.isTrue(connection._isInitialized); assert.notExists(connection._initPromise); + await connection.close(); }); it("should throw error if the database object is invalid", async function () { @@ -122,6 +121,50 @@ describe("Execute", function () { }); }); +describe("ping", function () { + it("should resolve to true when connection is alive", async function () { + const ok = await conn.ping(); + assert.strictEqual(ok, true); + }); +}); + +describe("transaction", function () { + it("should commit and return fn result on success", async function () { + const result = await conn.transaction(async () => { + const q = await conn.query("RETURN 42 AS x"); + const rows = await q.getAll(); + q.close(); + return rows[0].x; + }); + assert.equal(result, 42); + }); + + it("should rollback and rethrow on fn error", async function () { + const err = new Error("tx abort"); + try { + await conn.transaction(async () => { + await conn.query("RETURN 1"); + throw err; + }); + assert.fail("transaction should have thrown"); + } catch (e) { + assert.strictEqual(e, err); + } + const q = await conn.query("RETURN 1"); + assert.isTrue(q.hasNext()); + q.close(); + }); + + it("should reject non-function", async function () { + try { + await conn.transaction("not a function"); + assert.fail("transaction should have thrown"); + } catch (e) { + assert.equal(e.message, "transaction() requires a function."); + } + }); +}); + describe("Query", function () { it("should run a valid query", async function () { const queryResult = await conn.query("MATCH (a:person) RETURN COUNT(*)"); @@ -186,8 +229,8 @@ describe("Query", function () { describe("Timeout", function () { it("should abort a query if the timeout is reached", async function () { + const newConn = new lbug.Connection(db); try { - const newConn = new lbug.Connection(db); await newConn.init(); newConn.setQueryTimeout(1); await newConn.query( @@ -196,12 +239,14 @@ describe("Timeout", function () { assert.fail("No error thrown when the query times out."); } catch (err) { assert.equal(err.message, "Interrupted."); + } finally { + if (!newConn._isClosed) await newConn.close().catch(() => {}); } }); it("should allow setting a timeout before the connection is initialized", async function () { + const newConn = new lbug.Connection(db); try { - const newConn = new lbug.Connection(db); newConn.setQueryTimeout(1); await newConn.init(); await newConn.query( @@ -210,10 +255,83 @@ describe("Timeout", function () { assert.fail("No error thrown when the query times out."); } catch (err) { assert.equal(err.message, "Interrupted."); + } finally { + if (!newConn._isClosed) await newConn.close().catch(() => {}); } }); }); +describe("Interrupt", function () { + it("should abort a long-running query when interrupt() is called", { timeout: 5000 }, async function () { + if (process.platform === "win32") { + this.skip(); + } + const newConn = new lbug.Connection(db); + try { + await newConn.init(); + const longQuery = + "UNWIND RANGE(1, 30000) AS x UNWIND RANGE(1, 30000) AS y RETURN COUNT(x + y);"; + const queryPromise = newConn.query(longQuery); + setTimeout(() => newConn.interrupt(), 100); + try { + await queryPromise; + assert.fail("No error thrown when the query was interrupted."); + } catch (err) { + assert.equal(err.message, "Interrupted."); + } + } finally { + if (!newConn._isClosed) await newConn.close().catch(() => {}); + } + }); +}); + +describe("AbortSignal", function () { + it("should reject with AbortError when signal is already aborted before query starts", async function () { + const ac = new AbortController(); + ac.abort(); + try { + await conn.query("RETURN 1", { signal: ac.signal }); + assert.fail("No error thrown when signal was already aborted."); + } catch (err) { + assert.equal(err.name, "AbortError"); + assert.equal(err.message, "The operation was aborted."); + } + }); + + it("should reject with AbortError when signal is aborted during query", async function () { + const newConn = new lbug.Connection(db); + try { + await newConn.init(); + const ac = new AbortController(); + const longQuery = + "UNWIND RANGE(1, 30000) AS x UNWIND RANGE(1, 30000) AS y RETURN COUNT(x + y);"; + const queryPromise = newConn.query(longQuery, { signal: ac.signal }); + setTimeout(() => ac.abort(), 100); + try { + await queryPromise; + assert.fail("No error thrown when signal was aborted during query."); + } catch (err) { + assert.equal(err.name, "AbortError"); + } + } finally { + if (!newConn._isClosed) await newConn.close().catch(() => {}); + } + }); + + it("should work with progressCallback in options object", async function () { + let progressCalled = false; + const result = await conn.query("RETURN 1", { + progressCallback: () => { + progressCalled = true; + }, + }); + assert.exists(result); + const rows = Array.isArray(result) ? result : [result]; + assert.isAtLeast(rows.length, 1); + rows.forEach((r) => r.close()); + }); +}); + describe("Close", function () { it("should close the connection", async function () { const newConn = new lbug.Connection(db); diff --git a/tools/nodejs_api/test/test_data_type.js b/tools/nodejs_api/test/test_data_type.js index 68aa682516..b42ba44153 100644 --- a/tools/nodejs_api/test/test_data_type.js +++ b/tools/nodejs_api/test/test_data_type.js @@ -1,4 +1,3 @@ -const { assert } = require("chai"); const EPSILON = 1e-6; describe("BOOL", function () { @@ -380,8 +379,8 @@ describe("LIST", function () { assert.equal(result[0]["a.courseScoresPerTerm"].length, 2); assert.equal(result[0]["a.courseScoresPerTerm"][0].length, 2); assert.equal(result[0]["a.courseScoresPerTerm"][1].length, 3); - assert.deepEqual(result[0]["a.courseScoresPerTerm"][0][(10, 8)]); - assert.deepEqual(result[0]["a.courseScoresPerTerm"][1][(6, 7, 8)]); + assert.deepEqual(result[0]["a.courseScoresPerTerm"][0], [10, 8]); + assert.deepEqual(result[0]["a.courseScoresPerTerm"][1], [6, 7, 8]); }); }); @@ -451,7 +450,7 @@ describe("NODE", function () { ); assert.deepEqual(result["courseScoresPerTerm"][0], [10, 8]); assert.deepEqual(result["courseScoresPerTerm"][1], [6, 7, 8]); - assert.equal(result["usedNames"], "Aida"); + assert.deepEqual(result["usedNames"], ["Aida"]); assert.equal(result["_id"]["offset"], 0); assert.equal(result["_id"]["table"], 0); }); diff --git a/tools/nodejs_api/test/test_database.js b/tools/nodejs_api/test/test_database.js index 06ba4c24f4..0235347040 100644 --- a/tools/nodejs_api/test/test_database.js +++ b/tools/nodejs_api/test/test_database.js @@ -1,23 +1,25 @@ -const { assert } = require("chai"); -const tmp = require("tmp"); const process = require("process"); const path = require("path"); -const fs = require('fs'); +const fs = require("fs"); +const fsp = require("fs/promises"); +const os = require("os"); -const spwan = require("child_process").spawn; +const { spawn } = require("child_process"); const openDatabaseOnSubprocess = (dbPath) => { return new Promise((resolve, _) => { const node = process.argv[0]; + // Use env vars so Windows paths with backslashes don't break the -e code string + const env = { ...process.env, LBUG_PATH: lbugPath, DB_PATH: dbPath }; const code = ` (async() => { - const lbug = require("${lbugPath}"); - const db = new lbug.Database("${dbPath}", 1 << 28); + const lbug = require(process.env.LBUG_PATH); + const db = new lbug.Database(process.env.DB_PATH, 1 << 28); await db.init(); console.log("Database initialized."); })(); `; - const child = spwan(node, ["-e", code]); + const child = spawn(node, ["-e", code], { env }); let stdout = ""; let stderr = ""; child.stdout.on("data", (data) => { @@ -34,14 +36,7 @@ const openDatabaseOnSubprocess = (dbPath) => { describe("Database constructor", function () { it("should create a database with a valid path and buffer size", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); assert.exists(testDb); @@ -53,14 +48,7 @@ describe("Database constructor", function () { }); it("should create a database with a valid path and no buffer size", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath); assert.exists(testDb); @@ -70,22 +58,18 @@ describe("Database constructor", function () { assert.isTrue(testDb._isInitialized); assert.notExists(testDb._initPromise); - // check default config - let res = await conn.query("CALL current_setting('checkpoint_threshold') RETURN *"); + const testConn = new lbug.Connection(testDb); + const res = await testConn.query("CALL current_setting('checkpoint_threshold') RETURN *"); assert.equal(res.getNumTuples(), 1); const tuple = await res.getNext(); - assert.isTrue(tuple["checkpoint_threshold"] > 0); + assert.isTrue(Number(tuple["checkpoint_threshold"]) > 0); + res.close(); + testConn.close(); + testDb.close(); }); it("should create a database with auto checkpoint configured", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */, @@ -105,14 +89,7 @@ describe("Database constructor", function () { }); it("should create a database with checkpoint threshold configured", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */, @@ -126,21 +103,14 @@ describe("Database constructor", function () { let res = await conn.query("CALL current_setting('checkpoint_threshold') RETURN *"); assert.equal(res.getNumTuples(), 1); const tuple = await res.getNext(); - assert.equal(tuple["checkpoint_threshold"], 1234); + assert.equal(Number(tuple["checkpoint_threshold"]), 1234); res.close(); conn.close(); testDb.close(); }); it("should create a database with throwOnWalReplayFailure configured", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const walPath = dbPath + ".wal"; fs.writeFileSync(walPath, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); @@ -164,14 +134,7 @@ describe("Database constructor", function () { }); it("should create a database with enableChecksums configured", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); let testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */, @@ -211,14 +174,7 @@ describe("Database constructor", function () { }); it("should create a database in read-only mode", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); assert.exists(testDb); @@ -263,14 +219,7 @@ describe("Database constructor", function () { }); it("should create a database with a valid max DB size", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database( dbPath, @@ -374,22 +323,10 @@ describe("Database constructor", function () { describe("Database close", function () { it("should allow initializing a new database after closing", async function () { - if (process.platform === "win32") { - this._runnable.title += " (skipped: not implemented on Windows)"; - this.skip(); - } - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); - // FIXME: doesn't work properly on windows let subProcessResult = await openDatabaseOnSubprocess(dbPath); assert.notEqual(subProcessResult.code, 0); assert.include( @@ -404,14 +341,7 @@ describe("Database close", function () { }); it("should throw error if the database is closed", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); @@ -425,14 +355,7 @@ describe("Database close", function () { }); it("should close the database if it is initialized", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); @@ -444,14 +367,7 @@ describe("Database close", function () { }); it("should close the database if it is not initialized", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); assert.isFalse(testDb._isInitialized); @@ -462,14 +378,7 @@ describe("Database close", function () { }); it("should close a initializing database", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await Promise.all([testDb.init(), testDb.close()]); @@ -479,14 +388,7 @@ describe("Database close", function () { }); it("should gracefully close a database multiple times", async function () { - const tmpDbPath = await new Promise((resolve, reject) => { - tmp.dir({ unsafeCleanup: true }, (err, path, _) => { - if (err) { - return reject(err); - } - return resolve(path); - }); - }); + const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); @@ -507,9 +409,17 @@ describe("Database close", function () { assert.deepEqual(tuple, { "+(1,1)": 2 }); testDb.closeSync(); assert.isTrue(testDb._isClosed); - assert.throws(() => conn.querySync("RETURN 1+1"), Error, "Runtime exception: The current operation is not allowed because the parent database is closed."); + assert.throws( + () => conn.querySync("RETURN 1+1"), + Error, + /(Runtime exception:.*parent database is closed|Connection is closed\.)/ + ); conn.closeSync(); assert.isTrue(conn._isClosed); - assert.throws(() => res.resetIterator(), Error, "Runtime exception: The current operation is not allowed because the parent database is closed."); + assert.throws( + () => res.resetIterator(), + Error, + /(Runtime exception:.*parent database is closed|Connection is closed\.)/ + ); }); }); diff --git a/tools/nodejs_api/test/test_parameter.js b/tools/nodejs_api/test/test_parameter.js index 1b2815aa33..8398cdbcaf 100644 --- a/tools/nodejs_api/test/test_parameter.js +++ b/tools/nodejs_api/test/test_parameter.js @@ -1,5 +1,3 @@ -const { assert } = require("chai"); - describe("BOOL", function () { it("should transform booleans as BOOL parameter", async function () { const preparedStatement = await conn.prepare( @@ -73,7 +71,7 @@ describe("UINT64", function () { 1: 10000000000000000000, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT64)"], "10000000000000000000"); + assert.equal(result[0]["CAST($1, UINT64)"], 10000000000000000000); }); }); @@ -84,7 +82,7 @@ describe("UINT32", function () { 1: 4294967295, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT32)"], "4294967295"); + assert.equal(result[0]["CAST($1, UINT32)"], 4294967295); }); }); @@ -95,7 +93,7 @@ describe("UINT16", function () { 1: 65535, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT16)"], "65535"); + assert.equal(result[0]["CAST($1, UINT16)"], 65535); }); }); @@ -106,7 +104,7 @@ describe("UINT8", function () { 1: 255, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT8)"], "255"); + assert.equal(result[0]["CAST($1, UINT8)"], 255); }); }); diff --git a/tools/nodejs_api/test/test_pool.js b/tools/nodejs_api/test/test_pool.js new file mode 100644 index 0000000000..35f9f54e1d --- /dev/null +++ b/tools/nodejs_api/test/test_pool.js @@ -0,0 +1,161 @@ +require("./common.js"); +const path = require("path"); +const fsp = require("fs/promises"); +const os = require("os"); + +describe("Connection pool", function () { + let pool; + let tmpDir; + + before(async function () { + await initTests(); + tmpDir = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-pool-")); + }); + + after(async function () { + if (tmpDir) await fsp.rm(tmpDir, { recursive: true }).catch(() => {}); + }); + + afterEach(async function () { + if (pool && !pool._closed) { + await pool.close(); + } + }); + + it("createPool requires maxSize", function () { + assert.throws(() => lbug.createPool({}), /maxSize/); + assert.throws(() => lbug.createPool({ databasePath: ":memory:" }), /maxSize/); + assert.doesNotThrow(() => lbug.createPool({ maxSize: 5 })); + }); + + it("pool.run(fn) runs with a connection and releases on success", async function () { + pool = lbug.createPool({ + databasePath: path.join(tmpDir, "p1.kz"), + maxSize: 2, + databaseOptions: { bufferManagerSize: 1 << 24 }, + }); + const result = await pool.run(async (conn) => { + const r = await conn.query("RETURN 1 AS x"); + const rows = await r.getAll(); + r.close(); + return rows; + }); + assert.lengthOf(result, 1); + assert.strictEqual(result[0].x, 1); + }); + + it("pool.run(fn) releases on throw", async function () { + pool = lbug.createPool({ + databasePath: path.join(tmpDir, "p2.kz"), + maxSize: 2, + databaseOptions: { bufferManagerSize: 1 << 24 }, + }); + let err; + try { + await pool.run(async () => { + throw new Error("fail"); + }); + } catch (e) { + err = e; + } + assert.instanceOf(err, Error); + assert.include(err.message, "fail"); + const again = await pool.run(async (conn) => { + const r = await conn.query("RETURN 2 AS y"); + const rows = await r.getAll(); + r.close(); + return rows; + }); + assert.lengthOf(again, 1); + assert.strictEqual(again[0].y, 2); + }); + + it("acquire/release and multiple concurrent cycles", async function () { + pool = lbug.createPool({ + databasePath: path.join(tmpDir, "p3.kz"), + maxSize: 3, + databaseOptions: { bufferManagerSize: 1 << 24 }, + }); + const conn1 = await pool.acquire(); + const conn2 = await pool.acquire(); + const conn3 = await pool.acquire(); + const r1 = await conn1.query("RETURN 1 AS a"); + const r2 = await conn2.query("RETURN 2 AS b"); + const r3 = await conn3.query("RETURN 3 AS c"); + assert.strictEqual((await r1.getAll())[0].a, 1); + assert.strictEqual((await r2.getAll())[0].b, 2); + assert.strictEqual((await r3.getAll())[0].c, 3); + r1.close(); + r2.close(); + r3.close(); + pool.release(conn1); + pool.release(conn2); + pool.release(conn3); + const conn4 = await pool.acquire(); + const r4 = await conn4.query("RETURN 4 AS d"); + assert.strictEqual((await r4.getAll())[0].d, 4); + r4.close(); + pool.release(conn4); + }); + + it("pool does not exceed maxSize", async function () { + pool = lbug.createPool({ + databasePath: path.join(tmpDir, "p4.kz"), + maxSize: 2, + databaseOptions: { bufferManagerSize: 1 << 24 }, + }); + const c1 = await pool.acquire(); + const c2 = await pool.acquire(); + let resolved = false; + const p3 = pool.acquire().then((c) => { + resolved = true; + pool.release(c); + }); + await new Promise((r) => setImmediate(r)); + assert.isFalse(resolved); + pool.release(c1); + await p3; + assert.isTrue(resolved); + pool.release(c2); + }); + + it("acquire() rejects after acquireTimeoutMillis when no connection available", async function () { + pool = lbug.createPool({ + databasePath: path.join(tmpDir, "p5a.kz"), + maxSize: 1, + acquireTimeoutMillis: 80, + databaseOptions: { bufferManagerSize: 1 << 24 }, + }); + const c1 = await pool.acquire(); + let timeoutErr; + try { + await pool.acquire(); + } catch (e) { + timeoutErr = e; + } + assert.instanceOf(timeoutErr, Error); + assert.include(timeoutErr.message, "timed out"); + pool.release(c1); + }); + + it("pool.close() prevents new acquire and closes all", async function () { + pool = lbug.createPool({ + databasePath: path.join(tmpDir, "p5.kz"), + maxSize: 2, + databaseOptions: { bufferManagerSize: 1 << 24 }, + }); + await pool.run(async (conn) => { + const r = await conn.query("RETURN 1"); + r.close(); + }); + await pool.close(); + let closedErr; + try { + await pool.acquire(); + } catch (e) { + closedErr = e; + } + assert.instanceOf(closedErr, Error); + assert.include(closedErr.message, "closed"); + }); +}); diff --git a/tools/nodejs_api/test/test_query_result.js b/tools/nodejs_api/test/test_query_result.js index 5f84b3f32d..5d384c3939 100644 --- a/tools/nodejs_api/test/test_query_result.js +++ b/tools/nodejs_api/test/test_query_result.js @@ -1,5 +1,3 @@ -const { assert } = require("chai"); - const PERSON_IDS = [0, 2, 3, 5, 7, 8, 9, 10]; describe("Reset iterator", function () { @@ -66,22 +64,30 @@ describe("Get next", function () { } }); - it("should throw an error if there is no next tuple", async function () { + it("should return null when no more tuples", async function () { const queryResult = await conn.query( "MATCH (a:person) RETURN a.ID ORDER BY a.ID" ); for (let i = 0; i < 8; ++i) { await queryResult.getNext(); } - try { - await queryResult.getNext(); - assert.fail("No error thrown when there is no next tuple"); - } catch (err) { - assert.equal( - err.message, - "Runtime exception: No more tuples in QueryResult, Please check hasNext() before calling getNext()." - ); + const exhausted = await queryResult.getNext(); + assert.isNull(exhausted, "getNext() returns null when no more tuples"); + }); + + it("getNext() returns null exactly when hasNext() is false", async function () { + const queryResult = await conn.query( + "MATCH (a:person) RETURN a.ID ORDER BY a.ID" + ); + let count = 0; + while (queryResult.hasNext()) { + const row = await queryResult.getNext(); + assert.isNotNull(row, "getNext() must return value when hasNext() is true"); + count++; } + assert.equal(count, 8); + const afterExhausted = await queryResult.getNext(); + assert.isNull(afterExhausted, "getNext() must return null when hasNext() was false"); }); }); @@ -181,7 +187,7 @@ describe("Get column data types", function () { p.courseScoresPerTerm` ); const columnDataTypes = await queryResult.getColumnDataTypes(); - const ansexpectedResultArr = [ + const expectedResultArr = [ "INT64", "STRING", "BOOL", @@ -192,7 +198,7 @@ describe("Get column data types", function () { "INT64[]", "INT64[][]", ]; - assert.deepEqual(columnDataTypes, ansexpectedResultArr); + assert.deepEqual(columnDataTypes, expectedResultArr); }); }); @@ -211,6 +217,46 @@ describe("Get query summary", function () { }); }); +describe("Async iterator (for await...of)", function () { + it("should iterate rows same as getNext", async function () { + const queryResult = await conn.query( + "MATCH (a:person) RETURN a.ID ORDER BY a.ID" + ); + const ids = []; + for await (const row of queryResult) { + ids.push(row["a.ID"]); + } + assert.deepEqual(ids, PERSON_IDS); + }); + + it("should not materialize full result in memory", async function () { + const queryResult = await conn.query( + "MATCH (a:person) RETURN a.ID ORDER BY a.ID" + ); + let count = 0; + for await (const row of queryResult) { + count++; + assert.equal(row["a.ID"], PERSON_IDS[count - 1]); + } + assert.equal(count, PERSON_IDS.length); + }); +}); + +describe("toStream", function () { + it("should yield rows as Readable stream", async function () { + const queryResult = await conn.query( + "MATCH (a:person) RETURN a.ID ORDER BY a.ID" + ); + const stream = queryResult.toStream(); + const rows = []; + for await (const row of stream) { + rows.push(row); + } + const ids = rows.map((r) => r["a.ID"]); + assert.deepEqual(ids, PERSON_IDS); + }); +}); + describe("Close", function () { it("should close the query result", async function () { const queryResult = await conn.query( diff --git a/tools/nodejs_api/test/test_register_stream.js b/tools/nodejs_api/test/test_register_stream.js new file mode 100644 index 0000000000..6dea88b437 --- /dev/null +++ b/tools/nodejs_api/test/test_register_stream.js @@ -0,0 +1,79 @@ +describe("registerStream / LOAD FROM stream", function () { + it("should LOAD FROM registered stream and return rows", async function () { + async function* rowSource() { + yield [1, "a"]; + yield [2, "b"]; + yield [3, "c"]; + } + await conn.registerStream("mystream", rowSource(), { + columns: [ + { name: "id", type: "INT64" }, + { name: "label", type: "STRING" }, + ], + }); + try { + const result = await conn.query("LOAD FROM mystream RETURN *"); + const rows = Array.isArray(result) ? result : [result]; + assert.isAtLeast(rows.length, 1); + const r = rows[0]; + assert.isTrue(r.hasNext()); + const row1 = await r.getNext(); + assert.exists(row1); + assert.equal(row1["id"], 1); + assert.equal(row1["label"], "a"); + const row2 = await r.getNext(); + assert.equal(row2["id"], 2); + assert.equal(row2["label"], "b"); + const row3 = await r.getNext(); + assert.equal(row3["id"], 3); + assert.equal(row3["label"], "c"); + assert.isNull(await r.getNext()); + } finally { + conn.unregisterStream("mystream"); + } + }); + + it("should LOAD FROM stream with object rows (column order from schema)", async function () { + async function* objectRowSource() { + yield { id: 10, label: "x" }; + yield { label: "y", id: 20 }; + } + await conn.registerStream("objstream", objectRowSource(), { + columns: [ + { name: "id", type: "INT64" }, + { name: "label", type: "STRING" }, + ], + }); + try { + const result = await conn.query("LOAD FROM objstream RETURN *"); + const r = Array.isArray(result) ? result[0] : result; + const row1 = await r.getNext(); + assert.isNotNull(row1, "expected first row from stream"); + assert.equal(row1["id"], 10); + assert.equal(row1["label"], "x"); + const row2 = await r.getNext(); + assert.isNotNull(row2, "expected second row from stream"); + assert.equal(row2["id"], 20); + assert.equal(row2["label"], "y"); + assert.isNull(await r.getNext()); + } finally { + conn.unregisterStream("objstream"); + } + }); + + it("should unregisterStream by name", async function () { + async function* empty() { + if (false) yield []; + } + await conn.registerStream("tmpstream", empty(), { + columns: [{ name: "x", type: "INT64" }], + }); + conn.unregisterStream("tmpstream"); + try { + await conn.query("LOAD FROM tmpstream RETURN *"); + assert.fail("Expected error when loading from unregistered stream."); + } catch (e) { + assert.include(e.message, "not in scope"); + } + }); +}); diff --git a/tools/nodejs_api/test/test_resilience.js b/tools/nodejs_api/test/test_resilience.js new file mode 100644 index 0000000000..d53f3afa29 --- /dev/null +++ b/tools/nodejs_api/test/test_resilience.js @@ -0,0 +1,184 @@ +"use strict"; + +const path = require("path"); +const fsp = require("fs/promises"); +const os = require("os"); + +/** + * Resilience tests: close connection/database during or after operations. + * Goal: no crashes (SIGSEGV, native abort); all failures must surface as JS errors. + */ +function withTempDb(fn) { + return async function () { + const tmpPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const dbPath = path.join(tmpPath, "db.kz"); + const testDb = new lbug.Database(dbPath, 1 << 26 /* 64MB */); + await testDb.init(); + const testConn = new lbug.Connection(testDb); + await testConn.init(); + try { + await fn.call(this, testDb, testConn); + } finally { + if (!testDb._isClosed) await testDb.close().catch(() => {}); + if (!testConn._isClosed) await testConn.close().catch(() => {}); + await fsp.rm(tmpPath, { recursive: true }).catch(() => {}); + } + }; +} + +describe("Resilience (close during/after use)", { timeout: 10000 }, function () { + it("query rejects when connection is closed while query is in flight", withTempDb(async (testDb, testConn) => { + const longQuery = "UNWIND range(1, 20000) AS x UNWIND range(1, 2000) AS y RETURN count(*)"; + const queryPromise = testConn.query(longQuery); + await new Promise((r) => setTimeout(r, 80)); + testConn.closeSync(); + const timeoutMs = 2000; + const timeoutPromise = new Promise((_, reject) => { + setTimeout(() => reject(new Error(`Expected query to reject within ${timeoutMs}ms when connection was closed (timed out).`)), timeoutMs); + }); + try { + await Promise.race([queryPromise, timeoutPromise]); + assert.fail("Expected query to reject when connection was closed during execution."); + } catch (err) { + if ((err.message || "").includes("timed out")) throw err; + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + const ok = msg.includes("closed") || msg.includes("not allowed") || msg.includes("runtime"); + assert.isTrue(ok, `Expected error about closed/not allowed, got: ${err.message}`); + } + })); + + // Database close is synchronous and blocks until in-flight work completes (core behavior). + // So we cannot observe "query rejects when database is closed" without a non-blocking close. + it.skip("query rejects when database is closed while query is in flight", withTempDb(async (testDb, testConn) => { + const longQuery = "UNWIND range(1, 20000) AS x UNWIND range(1, 2000) AS y RETURN count(*)"; + const queryPromise = testConn.query(longQuery); + await new Promise((r) => setTimeout(r, 120)); + testDb.closeSync(); + const timeoutMs = 5000; + const timeoutPromise = new Promise((_, reject) => { + setTimeout(() => reject(new Error(`Expected query to reject within ${timeoutMs}ms when database was closed (timed out).`)), timeoutMs); + }); + try { + await Promise.race([queryPromise, timeoutPromise]); + assert.fail("Expected query to reject when database was closed during execution."); + } catch (err) { + if ((err.message || "").includes("timed out")) throw err; + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + const ok = msg.includes("closed") || msg.includes("not allowed") || msg.includes("runtime"); + assert.isTrue(ok, `Expected error about closed/not allowed, got: ${err.message}`); + } + })); + + it("getNext() after connection closed throws and does not crash", withTempDb(async (testDb, testConn) => { + const res = await testConn.query("RETURN 1 AS x"); + const row = await res.getNext(); + assert.equal(row.x, 1); + testConn.closeSync(); + try { + await res.getNext(); + assert.fail("Expected getNext() to throw after connection closed."); + } catch (err) { + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); + } + })); + + it("hasNext() after connection closed throws and does not crash", withTempDb(async (testDb, testConn) => { + const res = await testConn.query("RETURN 1 AS x"); + assert.isTrue(res.hasNext()); + testConn.closeSync(); + try { + res.hasNext(); + assert.fail("Expected hasNext() to throw after connection closed."); + } catch (err) { + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); + } + })); + + it("getNext() after database closed throws and does not crash", withTempDb(async (testDb, testConn) => { + const res = await testConn.query("RETURN 1 AS x"); + await res.getNext(); + testDb.closeSync(); + try { + await res.getNext(); + assert.fail("Expected getNext() to throw after database closed."); + } catch (err) { + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); + } + })); + + it("hasNext() after database closed throws and does not crash", withTempDb(async (testDb, testConn) => { + const res = await testConn.query("RETURN 1 AS x"); + testDb.closeSync(); + try { + res.hasNext(); + assert.fail("Expected hasNext() to throw after database closed."); + } catch (err) { + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); + } + })); + + it("registerStream then close connection then query throws before running", withTempDb(async (testDb, testConn) => { + async function* gen() { + yield [1]; + } + await testConn.registerStream("s", gen(), { columns: [{ name: "x", type: "INT64" }] }); + testConn.closeSync(); + try { + await testConn.query("LOAD FROM s RETURN *"); + assert.fail("Expected query to throw when connection is already closed."); + } catch (err) { + assert.instanceOf(err, Error); + assert.include((err.message || "").toLowerCase(), "closed"); + } + })); + + it("close connection while iterating result: second getNext throws", withTempDb(async (testDb, testConn) => { + const res = await testConn.query("UNWIND [1,2,3] AS x RETURN x"); + const a = await res.getNext(); + assert.equal(a.x, 1); + testConn.closeSync(); + try { + await res.getNext(); + assert.fail("Expected getNext() to throw after connection closed mid-iteration."); + } catch (err) { + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); + } + })); + + it("query after connection closed throws immediately (no native call)", async function () { + const testConn = new lbug.Connection(db); + await testConn.init(); + await testConn.close(); + try { + await testConn.query("RETURN 1"); + assert.fail("Expected query to throw when connection is closed."); + } catch (err) { + assert.equal(err.message, "Connection is closed."); + } + }); + + it("getNextSync after database closed throws", withTempDb(async (testDb, testConn) => { + const res = await testConn.query("RETURN 1 AS x"); + testDb.closeSync(); + try { + res.getNextSync(); + assert.fail("Expected getNextSync() to throw after database closed."); + } catch (err) { + assert.instanceOf(err, Error); + const msg = (err.message || "").toLowerCase(); + assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); + } + })); +}); diff --git a/tools/nodejs_api/test/test_sync_api.js b/tools/nodejs_api/test/test_sync_api.js index 25667227e3..be1e62f7b9 100644 --- a/tools/nodejs_api/test/test_sync_api.js +++ b/tools/nodejs_api/test/test_sync_api.js @@ -1,5 +1,3 @@ -const { assert } = require("chai"); - const PERSON_IDS = [0, 2, 3, 5, 7, 8, 9, 10]; describe("Query execution", function () { diff --git a/tools/nodejs_api/test/test_version.js b/tools/nodejs_api/test/test_version.js index f59df235d0..7057c15084 100644 --- a/tools/nodejs_api/test/test_version.js +++ b/tools/nodejs_api/test/test_version.js @@ -1,5 +1,3 @@ -const { assert } = require("chai"); - describe("Get version", function () { it("should get the version of the library", function () { assert.isString(lbug.VERSION); From 4e7006fae2bf8283794223e6ab9c56e809a8d415 Mon Sep 17 00:00:00 2001 From: VK <112831093+vkozio@users.noreply.github.com> Date: Wed, 18 Feb 2026 01:50:46 +0300 Subject: [PATCH 2/2] =?UTF-8?q?feat(nodejs):=20core=20API=20=E2=80=94=20as?= =?UTF-8?q?ync=20iterator,=20transaction(fn),=20ping,=20toStream,=20Databa?= =?UTF-8?q?se=20options,=20ThreadSafeFunction=20fix?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 3 + docs/testing.md | 6 - tools/nodejs_api/CHANGELOG.md | 9 - tools/nodejs_api/CMakeLists.txt | 29 +- tools/nodejs_api/README.md | 237 +------------ tools/nodejs_api/build.js | 19 -- tools/nodejs_api/copy_src_to_build.js | 22 -- tools/nodejs_api/docs/API.md | 319 ------------------ tools/nodejs_api/docs/database_locked.md | 40 --- .../docs/execution_chain_analysis.md | 65 ---- tools/nodejs_api/docs/nodejs_testing.md | 66 ---- tools/nodejs_api/examples/README.md | 11 - tools/nodejs_api/examples/quickstart.mjs | 32 -- tools/nodejs_api/examples/stream-load.mjs | 29 -- tools/nodejs_api/index.js | 4 - tools/nodejs_api/index.mjs | 12 - tools/nodejs_api/install.js | 80 ++--- tools/nodejs_api/package.json | 27 +- .../src_cpp/include/node_connection.h | 8 - .../src_cpp/include/node_query_result.h | 4 +- .../src_cpp/include/node_scan_replacement.h | 60 ---- .../src_cpp/include/node_stream_scan.h | 29 -- tools/nodejs_api/src_cpp/include/node_util.h | 2 +- tools/nodejs_api/src_cpp/node_connection.cpp | 156 +-------- .../nodejs_api/src_cpp/node_query_result.cpp | 12 - .../src_cpp/node_scan_replacement.cpp | 134 -------- tools/nodejs_api/src_cpp/node_stream_scan.cpp | 103 ------ tools/nodejs_api/src_js/connection.js | 235 +------------ tools/nodejs_api/src_js/database.js | 89 +---- tools/nodejs_api/src_js/index.js | 4 - tools/nodejs_api/src_js/index.mjs | 3 - tools/nodejs_api/src_js/lbug.d.ts | 164 +-------- tools/nodejs_api/src_js/pool.js | 222 ------------ tools/nodejs_api/src_js/query_result.js | 16 +- tools/nodejs_api/test/common.js | 64 ++-- tools/nodejs_api/test/test.js | 44 +-- tools/nodejs_api/test/test_concurrency.js | 58 ++-- tools/nodejs_api/test/test_connection.js | 82 +---- tools/nodejs_api/test/test_data_type.js | 7 +- tools/nodejs_api/test/test_database.js | 170 +++++++--- tools/nodejs_api/test/test_parameter.js | 10 +- tools/nodejs_api/test/test_pool.js | 161 --------- tools/nodejs_api/test/test_query_result.js | 32 +- tools/nodejs_api/test/test_register_stream.js | 79 ----- tools/nodejs_api/test/test_resilience.js | 184 ---------- tools/nodejs_api/test/test_sync_api.js | 2 + tools/nodejs_api/test/test_version.js | 2 + 47 files changed, 301 insertions(+), 2845 deletions(-) delete mode 100644 tools/nodejs_api/CHANGELOG.md delete mode 100644 tools/nodejs_api/copy_src_to_build.js delete mode 100644 tools/nodejs_api/docs/API.md delete mode 100644 tools/nodejs_api/docs/database_locked.md delete mode 100644 tools/nodejs_api/docs/execution_chain_analysis.md delete mode 100644 tools/nodejs_api/docs/nodejs_testing.md delete mode 100644 tools/nodejs_api/examples/README.md delete mode 100644 tools/nodejs_api/examples/quickstart.mjs delete mode 100644 tools/nodejs_api/examples/stream-load.mjs delete mode 100644 tools/nodejs_api/index.js delete mode 100644 tools/nodejs_api/index.mjs delete mode 100644 tools/nodejs_api/src_cpp/include/node_scan_replacement.h delete mode 100644 tools/nodejs_api/src_cpp/include/node_stream_scan.h delete mode 100644 tools/nodejs_api/src_cpp/node_scan_replacement.cpp delete mode 100644 tools/nodejs_api/src_cpp/node_stream_scan.cpp delete mode 100644 tools/nodejs_api/src_js/pool.js delete mode 100644 tools/nodejs_api/test/test_pool.js delete mode 100644 tools/nodejs_api/test/test_register_stream.js delete mode 100644 tools/nodejs_api/test/test_resilience.js diff --git a/.gitignore b/.gitignore index 5a84aa4078..b1f433c777 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +tree-sitter/ +tree-sitter-cypher/ + .idea/ .vscode .vs diff --git a/docs/testing.md b/docs/testing.md index 3a8a889431..da5927a444 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -35,12 +35,6 @@ TEST_F(MyTest, TestCaseName) { - `test/planner/` - Query planner tests - `test/optimizer/` - Query optimizer tests -## Node.js API - -Tests live in `tools/nodejs_api/test/` and use the Node.js built-in test runner (`node --test`). Run with `npm test` from `tools/nodejs_api/`. - -For guidelines on writing and reviewing these tests, see [Node.js API — Testing Guide](../tools/nodejs_api/docs/nodejs_testing.md). - ## Running Tests See `AGENTS.md` for build and test commands. diff --git a/tools/nodejs_api/CHANGELOG.md b/tools/nodejs_api/CHANGELOG.md deleted file mode 100644 index 853b2df74e..0000000000 --- a/tools/nodejs_api/CHANGELOG.md +++ /dev/null @@ -1,9 +0,0 @@ -## Changelog - -### Unreleased - -- **Breaking:** Drop support for Node.js versions lower than 20; the package now requires **Node.js 20 or later** (`engines.node: ">=20.0.0"`). -- **Breaking:** Upgrade native build tooling to **`cmake-js` ^8.0.0** and **`node-addon-api` ^8.0.0**, aligning with the Node.js 20+ support window. -- Clarify Node.js version requirement in the README. -- Add **Node.js API testing guide** at `tools/nodejs_api/docs/nodejs_testing.md` for test authors and reviewers (assertions, isolation, data types, concurrency, errors, resource lifecycle, validation checklist). Remove `tools/nodejs_api/test/test_correctness_audit.md` in favor of this guide. - diff --git a/tools/nodejs_api/CMakeLists.txt b/tools/nodejs_api/CMakeLists.txt index 743259bfca..c1bf5959a5 100644 --- a/tools/nodejs_api/CMakeLists.txt +++ b/tools/nodejs_api/CMakeLists.txt @@ -9,42 +9,25 @@ else() set(NPX_CMD npx) endif() -# Use --log-level error so INFO lines are not captured in OUTPUT_VARIABLE execute_process( - COMMAND ${NPX_CMD} cmake-js print-cmakejs-include --log-level error + COMMAND ${NPX_CMD} cmake-js print-cmakejs-include WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE CMAKE_JS_INC ) execute_process( - COMMAND ${NPX_CMD} cmake-js print-cmakejs-lib --log-level error + COMMAND ${NPX_CMD} cmake-js print-cmakejs-lib WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE CMAKE_JS_LIB ) execute_process( - COMMAND ${NPX_CMD} cmake-js print-cmakejs-src --log-level error + COMMAND ${NPX_CMD} cmake-js print-cmakejs-src WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE CMAKE_JS_SRC ) -string(STRIP "${CMAKE_JS_INC}" CMAKE_JS_INC) -string(STRIP "${CMAKE_JS_LIB}" CMAKE_JS_LIB) -string(STRIP "${CMAKE_JS_SRC}" CMAKE_JS_SRC) -# Filter out cmake-js INFO lines that may be mixed into stdout -foreach(VAR CMAKE_JS_INC CMAKE_JS_LIB CMAKE_JS_SRC) - string(REPLACE "\n" ";" _LINES "${${VAR}}") - set(_FILTERED "") - foreach(_LINE ${_LINES}) - string(STRIP "${_LINE}" _LINE) - if(_LINE AND NOT _LINE MATCHES "^INFO") - list(APPEND _FILTERED "${_LINE}") - endif() - endforeach() - list(JOIN _FILTERED " " _JOINED) - set(${VAR} "${_JOINED}") -endforeach() -string(STRIP "${CMAKE_JS_INC}" CMAKE_JS_INC) -string(STRIP "${CMAKE_JS_LIB}" CMAKE_JS_LIB) -string(STRIP "${CMAKE_JS_SRC}" CMAKE_JS_SRC) +string(STRIP ${CMAKE_JS_INC} CMAKE_JS_INC) +string(STRIP ${CMAKE_JS_LIB} CMAKE_JS_LIB) +string(STRIP ${CMAKE_JS_SRC} CMAKE_JS_SRC) # Print CMAKE_JS variables message(STATUS "CMake.js configurations: LIB=${CMAKE_JS_LIB}, INC=${CMAKE_JS_INC}, SRC=${CMAKE_JS_SRC}") diff --git a/tools/nodejs_api/README.md b/tools/nodejs_api/README.md index f1189270c2..95978ea6af 100644 --- a/tools/nodejs_api/README.md +++ b/tools/nodejs_api/README.md @@ -7,28 +7,10 @@ A high-performance graph database for knowledge-intensive applications. This Nod ## 📦 Installation -**Node.js version requirement** - -This package **requires Node.js 20 or later**. Older Node.js versions are not supported and installation may fail due to the enforced `engines.node` constraint and native build tooling (`cmake-js` 8.x, `node-addon-api` 8.x). - -**From npm (if published):** - ```bash npm install lbug ``` -**From GitHub** (monorepo; the Node package lives in `tools/nodejs_api`): - -- **pnpm** (v9+), subdirectory is supported: - - ```bash - pnpm add lbug@github:LadybugDB/ladybug#path:tools/nodejs_api - ``` - - On install, the package will build the native addon from source (needs CMake and a C++20 compiler). - -- **npm**: no built-in subdirectory install. Either use a **local path** after cloning and building (see [Build and use in other projects](#-build-and-use-in-other-projects-local)), or a tarball from [GitPkg](https://gitpkg.vercel.app/) (e.g. `https://gitpkg.vercel.app/LadybugDB/ladybug/tools/nodejs_api?main`). - --- ## 🚀 Quick Start @@ -82,15 +64,12 @@ main().catch(console.error); ## 📚 API Overview -**Full API reference:** [docs/API.md](docs/API.md) — types, methods, options, errors, and constants. - The `lbug` package exposes the following primary classes: -* **Database** – `new Database(path, bufferPoolSize?, ...)`. Initialize with `init()` / `initSync()` (optional; done on first use). When the file is locked, **async init() retries for up to 5s** (configurable: last ctor arg `openLockRetryMs`; set `0` to fail immediately). Close with `close()`. -* **Connection** – `new Connection(database, numThreads?)`. Run Cypher with `query(statement)` or `prepare(statement)` then `execute(preparedStatement, params)`. Use `transaction(fn)` for a single write transaction, `ping()` for liveness checks. **`getNumNodes(nodeName)`** and **`getNumRels(relName)`** return row counts for node/rel tables. Use `registerStream(name, source, { columns })` to load data from an AsyncIterable via `LOAD FROM name`; `unregisterStream(name)` when done. Configure with `setQueryTimeout(ms)`, `setMaxNumThreadForExec(n)`. -* **QueryResult** – Returned by `query()` / `execute()`. Consume with `getAll()`, `getNext()` / `hasNext()`, **async iteration** (`for await...of`), or **`toStream()`** (Node.js `Readable`). Use **`toString()`** for a string representation (header + rows; useful for debugging). Metadata: `getColumnNames()`, `getColumnDataTypes()`, `getQuerySummary()`. Call `close()` when done (optional if fully consumed). +* **Database** – `new Database(path, bufferPoolSize?, ...)`. Initialize with `init()` / `initSync()` (optional; done on first use). Close with `close()`. +* **Connection** – `new Connection(database, numThreads?)`. Run Cypher with `query(statement)` or `prepare(statement)` then `execute(preparedStatement, params)`. Use `transaction(fn)` for a single write transaction, `ping()` for liveness checks. Configure with `setQueryTimeout(ms)`, `setMaxNumThreadForExec(n)`. +* **QueryResult** – Returned by `query()` / `execute()`. Consume with `getAll()`, `getNext()` / `hasNext()`, **async iteration** (`for await...of`), or **`toStream()`** (Node.js `Readable`). Metadata: `getColumnNames()`, `getColumnDataTypes()`, `getQuerySummary()`. Call `close()` when done (optional if fully consumed). * **PreparedStatement** – Created by `conn.prepare(statement)`. Execute with `conn.execute(preparedStatement, params)`. Reuse for parameterized queries. -* **Pool** – `createPool({ databasePath, maxSize, ... })` returns a connection pool. Use **`pool.run(conn => ...)`** (recommended) or `acquire()` / `release(conn)`; call **`pool.close()`** when done. Both CommonJS (`require`) and ES Modules (`import`) are fully supported. @@ -116,59 +95,8 @@ for await (const row of result) { // Option 4: Node.js Readable stream (e.g. for .pipe()) const stream = result.toStream(); stream.on("data", (row) => console.log(row)); - -// Option 5: string representation (e.g. for debugging) -console.log(result.toString()); -``` - -### Table counts - -After creating node/rel tables and loading data, you can get row counts: - -```js -conn.initSync(); // or await conn.init() -const numUsers = conn.getNumNodes("User"); -const numFollows = conn.getNumRels("Follows"); ``` -### Connection pool - -Use **`createPool(options)`** to get a pool of connections (one shared `Database`, up to `maxSize` connections). Prefer **`pool.run(fn)`**: it acquires a connection, runs `fn(conn)`, and releases in `finally` (on success or throw), so you never leak a connection. - -**Options:** `maxSize` (required), `databasePath`, `databaseOptions` (same shape as `Database` constructor), `minSize` (default 0), `acquireTimeoutMillis` (default 0 = wait forever), `validateOnAcquire` (default false; if true, `conn.ping()` before hand-out). - -**Example (recommended: `run`):** - -```js -import { createPool } from "lbug"; - -const pool = createPool({ databasePath: "./mydb", maxSize: 10 }); - -const rows = await pool.run(async (conn) => { - const result = await conn.query("MATCH (u:User) RETURN u.name LIMIT 5"); - const rows = await result.getAll(); - result.close(); - return rows; -}); -console.log(rows); - -await pool.close(); -``` - -**Manual acquire/release:** If you need the same connection for multiple operations, use `acquire()` and always call `release(conn)` in a `finally` block so the connection is returned even on throw. - -```js -const conn = await pool.acquire(); -try { - await conn.query("..."); - // ... -} finally { - pool.release(conn); -} -``` - -When shutting down, call **`pool.close()`**: it rejects new and pending `acquire()`, then closes all connections and the database. - ### Transactions **Manual:** Run `BEGIN TRANSACTION`, then your queries, then `COMMIT` or `ROLLBACK`. On error, call `ROLLBACK` before continuing. @@ -193,75 +121,6 @@ await conn.transaction(async () => { }); ``` -### Loading data from a Node.js stream - -You can feed data from an **AsyncIterable** (generator, async generator, or any `Symbol.asyncIterator`) into Cypher using **scan replacement**: register a stream by name, then use `LOAD FROM name` in your query. Rows are pulled from JavaScript on demand during execution. - -**API:** - -* **`conn.registerStream(name, source, options)`** (async) - * `name` – string used in Cypher: `LOAD FROM name RETURN ...` - * `source` – AsyncIterable of rows. Each row is an **array** of column values (same order as `options.columns`) or an **object** keyed by column name. - * `options.columns` – **required**. Schema: array of `{ name: string, type: string }`. Supported types: `INT64`, `INT32`, `INT16`, `INT8`, `UINT64`, `UINT32`, `DOUBLE`, `FLOAT`, `STRING`, `BOOL`, `DATE`, `TIMESTAMP`. - -* **`conn.unregisterStream(name)`** - Unregisters the source so the name can be reused or to avoid leaving stale entries. Call after the query (or when done with the stream). - -**Example:** - -```js -async function* generateRows() { - yield [1, "Alice"]; - yield [2, "Bob"]; - yield [3, "Carol"]; -} - -await conn.registerStream("users", generateRows(), { - columns: [ - { name: "id", type: "INT64" }, - { name: "name", type: "STRING" }, - ], -}); - -const result = await conn.query("LOAD FROM users RETURN *"); -for await (const row of result) { - console.log(row); // { id: 1, name: "Alice" }, ... -} - -conn.unregisterStream("users"); -``` - -You can combine the stream with other Cypher: e.g. `LOAD FROM stream RETURN * WHERE col > 0`, or `COPY MyTable FROM (LOAD FROM stream RETURN *)`. - -### Database locked - -Only one process can open the same database path for writing. If the file is already locked, **async `init()` retries for up to 5 seconds** by default (grace period), then throws. You can tune or disable this: - -- **Default**: `new Database("./my.db")` — last ctor arg `openLockRetryMs` defaults to `5000` (retry for up to 5s on lock). -- **No retry**: `new Database("./my.db", 0, true, false, 0, true, -1, true, true, 0)` or pass `openLockRetryMs = 0` as the 10th argument to fail immediately. -- **Longer grace**: e.g. `openLockRetryMs = 3000` to wait up to 3s. - -The error has **`code === 'LBUG_DATABASE_LOCKED'`** so you can catch and handle it if the grace period wasn’t enough: - -```js -import { Database, Connection, LBUG_DATABASE_LOCKED } from "lbug"; - -const db = new Database("./my.db"); // already retries ~5s on lock -try { - await db.init(); -} catch (err) { - if (err.code === LBUG_DATABASE_LOCKED) { - console.error("Database still locked after grace period."); - } - throw err; -} -const conn = new Connection(db); -``` - -Use **read-only** mode for concurrent readers: `new Database(path, undefined, undefined, true)` so multiple processes can open the same DB for read. - -See [docs/database_locked.md](docs/database_locked.md) for how other systems handle this and best practices. - --- ## 🛠️ Local Development (for Contributors) @@ -280,100 +139,10 @@ npm run build ### Run Tests -See [docs/nodejs_testing.md](docs/nodejs_testing.md) for guidelines on writing and reviewing tests. - ```bash npm test ``` -When developing from the **monorepo root**, build the native addon first so tests see the latest C++ code: - -```bash -# From repo root (D:\prj\ladybug or similar) -make nodejs -# Or: cmake --build build/release --target lbugjs -# Then from tools/nodejs_api: -cd tools/nodejs_api && npm test -``` - ---- - -## 🔧 Build and use in other projects (local) - -To use the Node.js API from the Ladybug repo in another project without publishing to npm: - -1. **Build the addon** (from the Ladybug repo root): - - ```bash - make nodejs - ``` - - Or from this directory: - - ```bash - npm run build - ``` - - This compiles the native addon into `build/lbugjs.node` and copies JS and types. - -2. **In your other project**, add a file dependency in `package.json`: - - ```json - "dependencies": { - "lbug": "file:../path/to/ladybug/tools/nodejs_api" - } - ``` - - Then run `npm install`. After that, `require("lbug")` or `import ... from "lbug"` will use your local build. - -3. **Optional:** to pack and install a tarball instead: - - ```bash - cd /path/to/ladybug/tools/nodejs_api - npm run build - npm pack - ``` - - In the other project: `npm install /path/to/ladybug/tools/nodejs_api/lbug-0.0.1.tgz`. - -### Prebuilt in your fork (install from GitHub without building) - -If you install from GitHub (e.g. `pnpm add lbug@github:user/ladybug#path:tools/nodejs_api`), the package runs `install.js`: if it finds a prebuilt binary, it uses it and does not build from source. To ship a prebuilt in your fork: - -1. **Build once** in your clone (from repo root): - - ```bash - make nodejs - ``` - -2. **Create the prebuilt file** (name = `lbugjs--.node`): - - - Windows x64: copy `tools/nodejs_api/build/lbugjs.node` → `tools/nodejs_api/prebuilt/lbugjs-win32-x64.node` - - Linux x64: `lbugjs-linux-x64.node` - - macOS x64: `lbugjs-darwin-x64.node`, arm64: `lbugjs-darwin-arm64.node` - - Example (from repo root). **Windows (PowerShell):** - - ```powershell - New-Item -ItemType Directory -Force -Path tools/nodejs_api/prebuilt - Copy-Item tools/nodejs_api/build/lbugjs.node tools/nodejs_api/prebuilt/lbugjs-win32-x64.node - ``` - - **Linux/macOS:** - - ```bash - mkdir -p tools/nodejs_api/prebuilt - cp tools/nodejs_api/build/lbugjs.node tools/nodejs_api/prebuilt/lbugjs-$(node -p "process.platform")-$(node -p "process.arch").node - ``` - -3. **Commit and push** the `prebuilt/` folder. Then anyone (or you in another project) can do: - - ```bash - pnpm add lbug@github:YOUR_USERNAME/ladybug#path:tools/nodejs_api - ``` - - and the addon will be used from prebuilt without a local build. - --- ## 📦 Packaging and Binary Distribution diff --git a/tools/nodejs_api/build.js b/tools/nodejs_api/build.js index b1b93bae43..d326300260 100644 --- a/tools/nodejs_api/build.js +++ b/tools/nodejs_api/build.js @@ -3,9 +3,6 @@ const path = require("path"); const { execSync } = require("child_process"); const SRC_PATH = path.resolve(__dirname, "../.."); -const NODEJS_API = path.resolve(__dirname, "."); -const BUILD_DIR = path.join(NODEJS_API, "build"); -const SRC_JS_DIR = path.join(NODEJS_API, "src_js"); const THREADS = require("os").cpus().length; console.log(`Using ${THREADS} threads to build Lbug.`); @@ -15,19 +12,3 @@ execSync(`make nodejs NUM_THREADS=${THREADS}`, { cwd: SRC_PATH, stdio: "inherit", }); - -// Ensure build/ has latest JS from src_js (CMake copies at configure time only) -if (fs.existsSync(SRC_JS_DIR) && fs.existsSync(BUILD_DIR)) { - const files = fs.readdirSync(SRC_JS_DIR); - for (const name of files) { - if (name.endsWith(".js") || name.endsWith(".mjs") || name.endsWith(".d.ts")) { - fs.copyFileSync(path.join(SRC_JS_DIR, name), path.join(BUILD_DIR, name)); - } - } - // So package root has types when used as file: dependency - const dts = path.join(BUILD_DIR, "lbug.d.ts"); - if (fs.existsSync(dts)) { - fs.copyFileSync(dts, path.join(NODEJS_API, "lbug.d.ts")); - } - console.log("Copied src_js to build."); -} diff --git a/tools/nodejs_api/copy_src_to_build.js b/tools/nodejs_api/copy_src_to_build.js deleted file mode 100644 index 8ebe1581c4..0000000000 --- a/tools/nodejs_api/copy_src_to_build.js +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copies src_js/*.js, *.mjs, *.d.ts into build/ so tests run with the latest JS - * after "make nodejs" (which only copies at cmake configure time). - * Run from tools/nodejs_api. - */ -const fs = require("fs"); -const path = require("path"); - -const srcDir = path.join(__dirname, "src_js"); -const buildDir = path.join(__dirname, "build"); - -if (!fs.existsSync(buildDir)) { - console.warn("copy_src_to_build: build/ missing, run make nodejs first."); - process.exit(0); -} - -const re = /\.(js|mjs|d\.ts)$/; -const files = fs.readdirSync(srcDir).filter((n) => re.test(n)); -for (const name of files) { - fs.copyFileSync(path.join(srcDir, name), path.join(buildDir, name)); -} -console.log("Copied", files.length, "files from src_js to build."); diff --git a/tools/nodejs_api/docs/API.md b/tools/nodejs_api/docs/API.md deleted file mode 100644 index f7d22a4da2..0000000000 --- a/tools/nodejs_api/docs/API.md +++ /dev/null @@ -1,319 +0,0 @@ -# Ladybug Node.js API Reference - -Detailed API documentation for the `lbug` package. For installation, quick start, and usage patterns see [README.md](../README.md). - ---- - -## Module exports - -**CommonJS:** - -```js -const lbug = require("lbug"); -// or -const { Database, Connection, PreparedStatement, QueryResult, createPool, Pool, LBUG_DATABASE_LOCKED, VERSION, STORAGE_VERSION } = require("lbug"); -``` - -**ES Modules:** - -```js -import lbug from "lbug"; -// or -import { Database, Connection, PreparedStatement, QueryResult, createPool, Pool, LBUG_DATABASE_LOCKED, VERSION, STORAGE_VERSION } from "lbug"; -``` - -| Export | Description | -|--------|-------------| -| `Database` | Database instance (path, options). | -| `Connection` | Connection to a database; runs Cypher and manages streams. | -| `PreparedStatement` | Prepared Cypher statement (from `Connection.prepare`). | -| `QueryResult` | Result of `query()` / `execute()`; async iterable, stream, getAll, etc. | -| `createPool` | Factory: `createPool(options)` → `Pool`. | -| `Pool` | Connection pool (use `createPool`, not `new Pool`). | -| `LBUG_DATABASE_LOCKED` | Error code string when DB file is locked. | -| `VERSION` | Library version string. | -| `STORAGE_VERSION` | Storage version (bigint). | - ---- - -## Types (TypeScript / JSDoc) - -### Value types - -| Type | Description | -|------|-------------| -| `Nullable` | `T \| null` | -| `Callback` | `(error: Error \| null, result?: T) => void` | -| `ProgressCallback` | `(pipelineProgress, numPipelinesFinished, numPipelines) => void` | -| `QueryOptions` | `{ signal?: AbortSignal; progressCallback?: ProgressCallback }` | -| `NodeID` | `{ offset: number; table: number }` | -| `NodeValue` | `{ _label: string \| null; _id: NodeID \| null; [key: string]: any }` | -| `RelValue` | `{ _src, _dst, _label, _id; [key: string]: any }` | -| `RecursiveRelValue` | `{ _nodes: any[]; _rels: any[] }` | -| `LbugValue` | `null \| boolean \| number \| bigint \| string \| Date \| NodeValue \| RelValue \| RecursiveRelValue \| LbugValue[] \| { [key: string]: LbugValue }` | - -### Config types - -| Type | Description | -|------|-------------| -| `SystemConfig` | Database options (bufferPoolSize, enableCompression, readOnly, maxDBSize, autoCheckpoint, checkpointThreshold). | -| `PoolDatabaseOptions` | Same shape as Database constructor options (no path): bufferManagerSize, enableCompression, readOnly, maxDBSize, autoCheckpoint, checkpointThreshold, throwOnWalReplayFailure, enableChecksums, openLockRetryMs. | -| `PoolOptions` | databasePath?, databaseOptions?, minSize?, **maxSize**, acquireTimeoutMillis?, validateOnAcquire?. | -| `QuerySummary` | `{ compilingTime: number; executionTime: number }` (milliseconds). | - ---- - -## Database - -In-process database instance. One database can be shared by multiple `Connection` instances (e.g. in a pool). - -### Constructor - -```ts -new Database( - databasePath?: string, // default ":memory:" - bufferManagerSize?: number, // default 0 - enableCompression?: boolean, // default true - readOnly?: boolean, // default false - maxDBSize?: number, // default 0 - autoCheckpoint?: boolean, // default true - checkpointThreshold?: number, // default -1 - throwOnWalReplayFailure?: boolean, // default true - enableChecksums?: boolean, // default true - openLockRetryMs?: number // default 5000; 0 = fail immediately on lock -) -``` - -- **databasePath**: `":memory:"` or path to directory. Empty/undefined → `":memory:"`. -- **openLockRetryMs**: Only for async `init()`. Retry opening for up to this many ms when file is locked. Ignored for `:memory:`. - -### Instance methods - -| Method | Returns | Description | -|--------|---------|-------------| -| `init()` | `Promise` | Initialize DB (optional; done on first use). Retries on lock for up to `openLockRetryMs`. | -| `initSync()` | `void` | Initialize synchronously; blocks. No retry on lock. | -| `close()` | `Promise` | Close and release resources. | -| `closeSync()` | `void` | Close synchronously. | - -### Static methods - -| Method | Returns | Description | -|--------|---------|-------------| -| `Database.getVersion()` | `string` | Library version. | -| `Database.getStorageVersion()` | `number` | Storage version. | - -### Errors - -- Lock errors on init are normalized to `Error` with `code === LBUG_DATABASE_LOCKED`. See [database_locked.md](database_locked.md). - ---- - -## Connection - -Connection to a `Database`. Use for queries, prepared statements, transactions, streams, and metadata. - -### Constructor - -```ts -new Connection(database: Database, numThreads?: number) -``` - -- **numThreads**: Max threads for query execution. Can be set later with `setMaxNumThreadForExec(numThreads)`. - -### Initialization - -| Method | Returns | Description | -|--------|---------|-------------| -| `init()` | `Promise` | Initialize connection (optional; done on first query). | -| `initSync()` | `void` | Initialize synchronously; may block. | - -### Query execution - -| Method | Returns | Description | -|--------|---------|-------------| -| `query(statement, optionsOrProgressCallback?)` | `Promise` | Execute Cypher. Options: `{ signal?, progressCallback? }`. Rejects with `AbortError` if `signal` aborted. | -| `querySync(statement)` | `QueryResult \| QueryResult[]` | Execute synchronously; blocks. | -| `prepare(statement)` | `Promise` | Prepare a statement. | -| `prepareSync(statement)` | `PreparedStatement` | Prepare synchronously. | -| `execute(preparedStatement, params?, optionsOrProgressCallback?)` | `Promise` | Execute prepared statement with `params` object. Same options as `query`. | -| `executeSync(preparedStatement, params?)` | `QueryResult \| QueryResult[]` | Execute prepared statement synchronously. | - -**params**: Plain object, e.g. `{ name: "Alice", age: 30 }`. Keys must match parameter names in the prepared Cypher. - -### Transaction - -| Method | Returns | Description | -|--------|---------|-------------| -| `transaction(fn)` | `Promise` | Run `fn()` in a single write transaction. `BEGIN TRANSACTION` → fn() → `COMMIT` on success, `ROLLBACK` on throw. | - -### Configuration and control - -| Method | Returns | Description | -|--------|---------|-------------| -| `setMaxNumThreadForExec(numThreads)` | `void` | Max threads for execution. | -| `setQueryTimeout(timeoutInMs)` | `void` | Query timeout in ms; queries aborted after this. | -| `interrupt()` | `void` | Interrupt current query on this connection. No-op if none running. | - -### Metadata and health - -| Method | Returns | Description | -|--------|---------|-------------| -| `ping()` | `Promise` | Liveness check; rejects if connection broken. | -| `explain(statement)` | `Promise` | Run EXPLAIN on Cypher; returns plan string (one row per line). | -| `getNumNodes(nodeName)` | `number` | Count of nodes in node table. Connection must be initialized. | -| `getNumRels(relName)` | `number` | Count of relationships in rel table. | - -### Stream source (LOAD FROM) - -| Method | Returns | Description | -|--------|---------|-------------| -| `registerStream(name, source, options)` | `Promise` | Register AsyncIterable as `LOAD FROM name`. **options.columns** required: `[{ name, type }]`. Types: INT64, INT32, INT16, INT8, UINT64, UINT32, DOUBLE, FLOAT, STRING, BOOL, DATE, TIMESTAMP. | -| `unregisterStream(name)` | `void` | Unregister stream by name. | - -**source**: AsyncIterable of rows; each row is an array (column order) or object (column names). - -### Lifecycle - -| Method | Returns | Description | -|--------|---------|-------------| -| `close()` | `Promise` | Close connection. | -| `closeSync()` | `void` | Close synchronously. | - ---- - -## PreparedStatement - -Created by `Connection.prepare()` / `Connection.prepareSync()`. Do not construct directly. - -### Instance methods - -| Method | Returns | Description | -|--------|---------|-------------| -| `isSuccess()` | `boolean` | Whether preparation succeeded. | -| `getErrorMessage()` | `string` | Error message if preparation failed. | - -Execution is via `conn.execute(preparedStatement, params)` or `conn.executeSync(preparedStatement, params)`. If `!isSuccess()`, `execute` rejects with `getErrorMessage()`. - ---- - -## QueryResult - -Returned by `Connection.query()`, `Connection.querySync()`, `Connection.execute()`, `Connection.executeSync()`. Implements `AsyncIterable | null>`. - -### Consumption (pick one style) - -| Method / usage | Returns | Description | -|----------------|---------|-------------| -| `getAll()` | `Promise` | All rows (loads into memory). | -| `getAllSync()` | `Record[]` | All rows synchronously. | -| `getNext()` | `Promise` | Next row; null when exhausted. | -| `getNextSync()` | `Record \| null` | Next row synchronously. | -| `hasNext()` | `boolean` | Whether more rows exist. | -| `for await (const row of result)` | — | Async iteration; no full materialization. | -| `toStream()` | `stream.Readable` | Node.js Readable (object mode), one row per chunk. | -| `each(resultCb, doneCb, errorCb)` | `void` | Callback-based iteration. | -| `all(resultCb, errorCb)` | `void` | Callback with all rows. | -| `toString()` | `string` | Header + rows (or error message for failed query). | - -### Metadata - -| Method | Returns | Description | -|--------|---------|-------------| -| `getNumTuples()` | `number` | Number of rows. | -| `getColumnNames()` | `Promise` | Column names. | -| `getColumnNamesSync()` | `string[]` | Column names synchronously. | -| `getColumnDataTypes()` | `Promise` | Column data types. | -| `getColumnDataTypesSync()` | `string[]` | Column types synchronously. | -| `getQuerySummary()` | `Promise` | `{ compilingTime, executionTime }` in ms. | -| `getQuerySummarySync()` | `QuerySummary` | Same, synchronously. | - -### Other - -| Method | Returns | Description | -|--------|---------|-------------| -| `resetIterator()` | `void` | Reset cursor to start (for re-iteration). | -| `close()` | `void` | Release resources. Optional if fully consumed. | - -**Multiple results**: A batch of statements can return `QueryResult[]`. Single statement returns one `QueryResult`. - ---- - -## Pool and createPool - -Connection pool: one shared `Database`, up to `maxSize` `Connection` instances. - -### createPool(options) - -```ts -function createPool(options: PoolOptions): Pool -``` - -**PoolOptions:** - -| Option | Type | Default | Description | -|--------|------|---------|-------------| -| `databasePath` | string | `":memory:"` | DB path. | -| `databaseOptions` | PoolDatabaseOptions | — | Same shape as Database constructor (no path). | -| `minSize` | number | 0 | Minimum connections to keep. | -| `maxSize` | number | **required** | Maximum connections. | -| `acquireTimeoutMillis` | number | 0 | Max wait for acquire (0 = wait forever). | -| `validateOnAcquire` | boolean | false | If true, call `conn.ping()` before handing out. | - -### Pool methods - -| Method | Returns | Description | -|--------|---------|-------------| -| `acquire()` | `Promise` | Get a connection; **must** call `release(conn)` when done. | -| `release(conn)` | `void` | Return connection to pool. | -| `run(fn)` | `Promise` | Acquire, run `fn(conn)`, release in `finally`. Preferred over manual acquire/release. | -| `close()` | `Promise` | Reject new/pending acquire; close all connections and database. | - -**Example:** - -```js -const pool = createPool({ databasePath: "./mydb", maxSize: 10 }); -const rows = await pool.run(async (conn) => { - const result = await conn.query("MATCH (u:User) RETURN u.name LIMIT 5"); - const rows = await result.getAll(); - result.close(); - return rows; -}); -await pool.close(); -``` - ---- - -## Constants - -| Name | Type | Description | -|------|------|-------------| -| `LBUG_DATABASE_LOCKED` | `"LBUG_DATABASE_LOCKED"` | Error code when DB file is locked. Use with `err.code === LBUG_DATABASE_LOCKED`. | -| `VERSION` | string | Library version (same as `Database.getVersion()`). | -| `STORAGE_VERSION` | bigint | Storage version (same as `Database.getStorageVersion()`). | - ---- - -## Query options and cancellation - -- **signal**: Pass `AbortSignal` (e.g. from `AbortController`) in options to cancel `query()` or `execute()`. On abort, the promise rejects with `DOMException` "AbortError". -- **progressCallback**: `(pipelineProgress, numPipelinesFinished, numPipelines) => void`. Optional progress updates during execution. - -Legacy: you can pass a single function as the second argument to `query(statement, progressCallback)` or `execute(ps, params, progressCallback)` instead of an options object. - ---- - -## Error handling - -- **Database lock**: Async `init()` retries for `openLockRetryMs` (default 5s). Then throws with `code === LBUG_DATABASE_LOCKED`. See [database_locked.md](database_locked.md). -- **Abort**: When `options.signal` is aborted, `query`/`execute` reject with `DOMException` "AbortError". -- **Prepared statement**: If `!preparedStatement.isSuccess()`, `execute` rejects with `preparedStatement.getErrorMessage()`. -- **Validation**: Invalid arguments (e.g. non-object params, wrong types) throw `Error` with descriptive messages. - ---- - -## Related docs - -- [README.md](../README.md) — Installation, quick start, transactions, stream loading, pool usage, prebuilt binaries. -- [database_locked.md](database_locked.md) — Lock behavior, retry, read-only, best practices. -- [execution_chain_analysis.md](execution_chain_analysis.md) — LOAD FROM stream execution chain (for implementers). diff --git a/tools/nodejs_api/docs/database_locked.md b/tools/nodejs_api/docs/database_locked.md deleted file mode 100644 index 164adc3f9c..0000000000 --- a/tools/nodejs_api/docs/database_locked.md +++ /dev/null @@ -1,40 +0,0 @@ -# Database locked - -## When it happens - -The database file is locked when: - -- Another process has already opened the same path for read-write (e.g. another Node app, the Ladybug shell, or a backup). -- You open the same path twice in one process (e.g. two `Database` instances to the same path) and both try to write. - -Opening is done at the first use: `db.init()`, `db.initSync()`, or the first `conn.query()` on that database. If the OS file lock cannot be acquired, the native layer throws and the Node API surfaces it as an **Error with `code === 'LBUG_DATABASE_LOCKED'`**. - -## How other systems handle it - -| System | Approach | -|----------|----------| -| **SQLite** | `busy_timeout` (e.g. 5 seconds): block until lock is released or timeout, then return `SQLITE_BUSY`. Apps often retry with exponential backoff. | -| **DuckDB** | Open fails immediately if locked; application retries with backoff. | -| **LMDB** | Single writer; readers use `MDB_NOLOCK` or shared lock. Writers get exclusive lock. | -| **RocksDB** | Options for concurrent access; single process or client–server. | - -Common patterns: - -1. **Fail fast** — return a clear error (e.g. “database locked”) so the app can show a message or retry. -2. **Retry with backoff** — in application code: catch the error, wait (e.g. 50 ms, 100 ms, 200 ms), try again, then give up. -3. **Block with timeout** — wait up to N ms for the lock (requires support in the engine; Ladybug currently uses “fail immediately”). -4. **Read-only for readers** — open in read-only mode so multiple processes can read; only one writer. - -## What the Node API does - -- **Grace period (async init only)**: When you open a database with async `init()` (or the first `query()`), the driver **retries for up to 5 seconds** by default if the file is locked. So short-lived contention (e.g. MCP server or another tool briefly holding the lock) often succeeds without you doing anything. Configure with the last constructor argument `openLockRetryMs` (default `5000`; set `0` to fail immediately). -- **Clear error**: After the grace period or when retry is disabled, you get an Error whose message includes “Could not set lock on file” and a link to the concurrency docs. -- **Error code**: The error is normalized so `err.code === 'LBUG_DATABASE_LOCKED'`. You can import `LBUG_DATABASE_LOCKED` from `lbug` and catch it if you need custom retry or messaging. -- **Sync init**: `initSync()` does not retry; it fails immediately on lock (no blocking wait in the driver). - -## Best practices - -1. **One writer per path** — avoid opening the same on-disk database for write from more than one process at a time. -2. **Concurrent readers** — use `new Database(path, undefined, undefined, true)` (read-only) so multiple processes can read the same DB. -3. **Retry with backoff** — if you expect short-lived contention (e.g. restart or another tool), catch `LBUG_DATABASE_LOCKED`, wait, and retry a few times. -4. **Close when done** — call `db.close()` so the lock is released for other processes. diff --git a/tools/nodejs_api/docs/execution_chain_analysis.md b/tools/nodejs_api/docs/execution_chain_analysis.md deleted file mode 100644 index 8e633201bc..0000000000 --- a/tools/nodejs_api/docs/execution_chain_analysis.md +++ /dev/null @@ -1,65 +0,0 @@ -# LOAD FROM stream: Execution Chain (reference & recommendations) - -## Execution chain diagram - -```mermaid -%%{init: {'flowchart': {'defaultRenderer': 'elk', 'elk': {'direction': 'DOWN'}}}}%% -flowchart TB - subgraph JS["JS (main thread)"] - direction TB - A["query('LOAD FROM name RETURN *')"] - B["registerStream: getChunk(requestId) → pending.push; runConsumer via setImmediate"] - C["runConsumer: sort pending, for each id take it.next(), returnChunk(id, rows, done)"] - D["AsyncIterator: it.next() → yield rows"] - A --> B - B --> C - C --> D - end - - subgraph CppAddon["C++ addon (Node worker thread)"] - direction TB - E["tableFunc: mutex, nextRequestId(), setChunkRequest, BlockingCall(getChunk)"] - F["wait reqPtr->cv until filled"] - G["returnChunkFromJS: req->rows, filled=true, cv.notify_one"] - H["Copy rows to output.dataChunk, return cap"] - E --> F - G --> F - F --> H - end - - subgraph Engine["Engine (single task thread, canParallelFunc=false)"] - direction TB - I["getNextTuple → getNextTuplesInternal"] - J["tableFunc(input, output) → numTuplesScanned"] - K["FactorizedTable accumulates chunks"] - L["MaterializedQueryResult + FactorizedTableIterator"] - I --> J - J --> K - K --> L - end - - J --> E - E --> B - C --> G - H --> J - L --> M["JS hasNext / getNext"] - M --> A -``` - ---- - -## Useful observations - -- **Order**: With `canParallelFunc = false`, one engine thread calls `tableFunc` sequentially. Request IDs are assigned under mutex; JS `runConsumer` sorts `pending` and serves chunks by `requestId`, so iterator order is preserved. -- **End of stream**: Engine calls `tableFunc` until it returns 0. JS sends `returnChunk(id, [], true)` when the iterator is done; C++ returns 0 and the engine stops. No extra call after 0. -- **getNext contract**: Core `getNext()` throws if `!hasNext()`. Addon always checks `hasNext()` before `getNext()` and returns `null` when exhausted so that JS API matches `getNext(): Promise`. - ---- - -## Recommendations for the future - -1. **Keep `canParallelFunc = false`** for the node stream table function. Enabling parallelism would require a deterministic merge of chunks by requestId on the engine side; until then, single-thread keeps order and avoids subtle bugs. -2. **Any new code path that reads rows** (e.g. another language binding or helper) must guard with `hasNext()` before `getNext()`; core will throw otherwise. -3. **Mutex in `tableFunc`**: Currently redundant with single-thread execution but harmless. If parallelism is ever introduced, either remove the mutex and solve ordering in the engine or keep it and document that the stream source is intentionally serialized. -4. **Tests**: Prefer iterating with `hasNext()` + `getNext()` and asserting `getNext()` returns `null` exactly when `hasNext()` becomes false, to lock the contract (see `test_query_result.js`). -5. **Rebuild and full test run** (e.g. `register_stream` + `query_result`) after any change in the addon or engine table function path. diff --git a/tools/nodejs_api/docs/nodejs_testing.md b/tools/nodejs_api/docs/nodejs_testing.md deleted file mode 100644 index 5016e8abad..0000000000 --- a/tools/nodejs_api/docs/nodejs_testing.md +++ /dev/null @@ -1,66 +0,0 @@ -# Node.js API — Testing Guide - -Guidelines for writing and reviewing tests for the Node.js API (`tools/nodejs_api/`). Use this when adding or changing tests to keep the suite correct, isolated, and maintainable. - ---- - -## 1. Assertions and Oracles - -- **Strict equality:** The test shim’s `assert.equal(a, b)` is `strictEqual`. A number and a string (e.g. `1234` vs `'1234'`) are not equal. When the API or DB returns strings (e.g. `current_setting()`), coerce before comparing: `Number(tuple["checkpoint_threshold"]) === 1234`, or compare to the expected string. -- **Floating point:** Use `assert.approximately(actual, expected, delta)` for FLOAT/DOUBLE. Choose a small delta (e.g. `1e-6`). NaN is not considered approximately equal to anything; that is intentional. -- **Arrays in assertions:** In JavaScript, `(a, b)` is the comma operator and evaluates to `b`. Never write `assert.deepEqual(x, [(10, 8)])` — that compares `x` to `[8]`. Use `assert.deepEqual(x, [10, 8])`. -- **API return types:** If the API can return either a value or a list (e.g. single value vs array), assert the actual shape (e.g. `assert.deepEqual(result["usedNames"], ["Aida"])` if the API returns an array). -- **Naming:** Use clear variable names (e.g. `expectedResultArr` instead of typo-prone names) so assertions stay readable. - ---- - -## 2. Test Isolation and Shared State - -- **Assert the right object:** If the test creates its own database or connection (e.g. `testDb`, `testConn`), run config or data checks **against that instance**, not the global `db`/`conn`. Using global `conn` in a test that built `testDb` checks the wrong database and is a logic bug. -- **Prefer a dedicated connection for local DBs:** When testing options of a newly created database, create a connection to that database, run the query, then close both the connection and the database. -- **Close what you open:** If a test creates a connection or database, close it in the same test (or in a reliable `finally`/hook). Leaving connections or databases open can leak handles and affect other tests or the process. -- **Shared fixtures:** Tests that use the global `db`/`conn` from `before()` are fine for read-only or shared-scenario tests; just don’t use them to verify state of a different, locally created DB/conn. - ---- - -## 3. Data Types and Boundaries - -- **Settings as strings:** `current_setting()` returns strings (e.g. `'1234'`, `'False'`). For numeric checks use `Number(...)`; for booleans compare to the string the backend returns (e.g. `"False"`). -- **Large integers:** Values above `Number.MAX_SAFE_INTEGER` (2^53) can lose precision in JavaScript. For UINT64/INT64 round-trip tests with very large values, a short comment is helpful (e.g. that values > 2^53 may be lossy in JS). -- **Column names:** Tests that depend on exact column names (e.g. from `RETURN CAST($1, 'UINT64')`) will break if the backend changes display names. Prefer stable API contracts when possible; otherwise document the dependency. - ---- - -## 4. Concurrency and Timing - -- **Time-based races:** Tests that close a connection or DB after a short delay (e.g. 80 ms) then assert “query rejects” can be flaky on slow CI. Use a timeout (e.g. 2 s) so the test fails fast if the query never rejects, and consider slightly longer delays on CI if needed. -- **Node.js test runner timeout:** For long-running tests (e.g. interrupt), set timeout via the test option: `it("...", { timeout: 5000 }, async function () { ... })`. The test context in `node:test` does not provide `this.timeout()`. -- **Concurrent queries:** When running multiple queries in parallel on the same connection, assert results against known stable data (e.g. fixed IDs) and avoid shared mutable state. - ---- - -## 5. Error Messages and API Contracts - -- **Exact vs partial match:** `assert.equal(e.message, "exact string")` is brittle if the backend changes wording. For stability, prefer `assert.include(e.message, keyPhrase)` or similar when the exact text is not part of the public API contract. -- **Resilience tests:** Checking that the error message contains “closed” or “not allowed” is a good balance between stability and coverage. - ---- - -## 6. Resource Lifecycle and Cleanup - -- **Databases and connections:** Every database or connection created in a test should be closed in that test (or in a `finally`/hook that always runs). This includes “positive” tests (e.g. “should create a database with valid path and no buffer size”). -- **Query results:** Prefer calling `res.close()` when a test opens many results (e.g. concurrency) or when the test is long-lived. Relying on GC alone can hide leaks. -- **Temp directories:** Use a helper (e.g. `withTempDb`) that creates a temp DB/conn, runs the test, and in `finally` closes them and removes the temp path. Avoid leaving temp dirs or DBs open. -- **process.exit(0):** If the test runner uses `process.exit(0)` in `after()` to avoid the event loop hanging (e.g. due to the native addon), document it; it can mask unclosed resources, so use only when necessary. - ---- - -## 7. Validation Checklist - -Before submitting test changes: - -- [ ] Run the full suite: `npm test` (from `tools/nodejs_api/`). -- [ ] If you test against an installed package, run with `TEST_INSTALLED=1` as applicable. -- [ ] For tests that create a local DB or connection, ensure config/data assertions use that instance, not the global `db`/`conn`. -- [ ] Ensure no comma-operator traps in assertions: no `(a, b)` used as an array element in `deepEqual`/`equal`. -- [ ] All resources (DB, connection) created in the test are closed in the same test or a guaranteed cleanup path. diff --git a/tools/nodejs_api/examples/README.md b/tools/nodejs_api/examples/README.md deleted file mode 100644 index 63ace4baf5..0000000000 --- a/tools/nodejs_api/examples/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Examples - -Run from `tools/nodejs_api` (after `make nodejs` or `npm run build`): - -```bash -node examples/quickstart.mjs -node examples/stream-load.mjs -``` - -- **quickstart.mjs** — In-memory DB, create table, load data from a stream via `COPY FROM (LOAD FROM ...)`, then query. -- **stream-load.mjs** — Register an async iterable and consume it with `LOAD FROM name RETURN *`. diff --git a/tools/nodejs_api/examples/quickstart.mjs b/tools/nodejs_api/examples/quickstart.mjs deleted file mode 100644 index d78ec07f9a..0000000000 --- a/tools/nodejs_api/examples/quickstart.mjs +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Quickstart: in-memory database, create schema, load from stream, query. - * Run from tools/nodejs_api: node examples/quickstart.mjs - */ -import { Database, Connection } from "lbug"; - -async function* userRows() { - yield ["Alice", 30]; - yield ["Bob", 25]; -} - -const db = new Database(":memory:"); -const conn = new Connection(db); - -await conn.query(` - CREATE NODE TABLE User(name STRING, age INT64, PRIMARY KEY (name)); -`); - -await conn.registerStream("users", userRows(), { - columns: [ - { name: "name", type: "STRING" }, - { name: "age", type: "INT64" }, - ], -}); -await conn.query("COPY User FROM (LOAD FROM users RETURN *)"); -conn.unregisterStream("users"); - -const result = await conn.query("MATCH (u:User) RETURN u.name, u.age;"); -const rows = await result.getAll(); -console.log(rows); - -await db.close(); diff --git a/tools/nodejs_api/examples/stream-load.mjs b/tools/nodejs_api/examples/stream-load.mjs deleted file mode 100644 index 9adc84f8a9..0000000000 --- a/tools/nodejs_api/examples/stream-load.mjs +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Load data from a JavaScript async iterable via LOAD FROM. - * Run from tools/nodejs_api: node examples/stream-load.mjs - */ -import { Database, Connection } from "lbug"; - -async function* generateRows() { - yield [1, "Alice"]; - yield [2, "Bob"]; - yield [3, "Carol"]; -} - -const db = new Database(":memory:"); -const conn = new Connection(db); - -await conn.registerStream("users", generateRows(), { - columns: [ - { name: "id", type: "INT64" }, - { name: "name", type: "STRING" }, - ], -}); - -const result = await conn.query("LOAD FROM users RETURN *"); -for await (const row of result) { - console.log(row); -} - -conn.unregisterStream("users"); -await db.close(); diff --git a/tools/nodejs_api/index.js b/tools/nodejs_api/index.js deleted file mode 100644 index 04ea8a1618..0000000000 --- a/tools/nodejs_api/index.js +++ /dev/null @@ -1,4 +0,0 @@ -"use strict"; - -// After `make nodejs` or `npm run build`, entry point is build/ -module.exports = require("./build"); diff --git a/tools/nodejs_api/index.mjs b/tools/nodejs_api/index.mjs deleted file mode 100644 index 54fd3b6774..0000000000 --- a/tools/nodejs_api/index.mjs +++ /dev/null @@ -1,12 +0,0 @@ -export { - default, - Database, - Connection, - PreparedStatement, - QueryResult, - createPool, - Pool, - LBUG_DATABASE_LOCKED, - VERSION, - STORAGE_VERSION, -} from "./build/index.mjs"; diff --git a/tools/nodejs_api/install.js b/tools/nodejs_api/install.js index cbf69ea41d..6a010b335a 100644 --- a/tools/nodejs_api/install.js +++ b/tools/nodejs_api/install.js @@ -8,100 +8,60 @@ const process = require("process"); const isNpmBuildFromSourceSet = process.env.npm_config_build_from_source; const platform = process.platform; const arch = process.arch; - -// Skip when already built (e.g. local dev after make nodejs) -if (fsCallback.existsSync(path.join(__dirname, "build", "lbugjs.node"))) { - process.exit(0); -} - const prebuiltPath = path.join( __dirname, "prebuilt", `lbugjs-${platform}-${arch}.node` ); -const buildDir = path.join(__dirname, "build"); -const srcJsDir = path.join(__dirname, "src_js"); -const lbugSourceDir = path.join(__dirname, "lbug-source"); - // Check if building from source is forced if (isNpmBuildFromSourceSet) { console.log( "The NPM_CONFIG_BUILD_FROM_SOURCE environment variable is set. Building from source." ); } -// Prebuilt available + git-clone layout (src_js present, no lbug-source): use prebuilt and copy src_js → build/ -else if (fsCallback.existsSync(prebuiltPath) && fsCallback.existsSync(srcJsDir)) { - console.log("Prebuilt binary is available (git clone layout)."); - if (!fsCallback.existsSync(buildDir)) { - fsCallback.mkdirSync(buildDir, { recursive: true }); - } - fs.copyFileSync(prebuiltPath, path.join(buildDir, "lbugjs.node")); - const jsFiles = fs.readdirSync(srcJsDir).filter((file) => { - return file.endsWith(".js") || file.endsWith(".mjs") || file.endsWith(".d.ts"); - }); - for (const file of jsFiles) { - fs.copyFileSync(path.join(srcJsDir, file), path.join(buildDir, file)); - } - console.log("Done! Prebuilt + JS copied to build/."); - process.exit(0); -} -// Prebuilt available + tarball layout (lbug-source present): copy to root (legacy publish flow) +// Check if prebuilt binaries are available else if (fsCallback.existsSync(prebuiltPath)) { console.log("Prebuilt binary is available."); + console.log("Copying prebuilt binary to package directory..."); fs.copyFileSync(prebuiltPath, path.join(__dirname, "lbugjs.node")); - const jsSourceDir = path.join(lbugSourceDir, "tools", "nodejs_api", "src_js"); + console.log( + `Copied ${prebuiltPath} -> ${path.join(__dirname, "lbugjs.node")}.` + ); + console.log("Copying JS files to package directory..."); + const jsSourceDir = path.join( + __dirname, + "lbug-source", + "tools", + "nodejs_api", + "src_js" + ); const jsFiles = fs.readdirSync(jsSourceDir).filter((file) => { return file.endsWith(".js") || file.endsWith(".mjs") || file.endsWith(".d.ts"); }); + console.log("Files to copy: "); + for (const file of jsFiles) { + console.log(" " + file); + } for (const file of jsFiles) { fs.copyFileSync(path.join(jsSourceDir, file), path.join(__dirname, file)); } + console.log("Copied JS files to package directory."); console.log("Done!"); process.exit(0); } else { console.log("Prebuilt binary is not available, building from source..."); } -if (!fsCallback.existsSync(lbugSourceDir)) { - // Full git clone (e.g. CI Windows): no lbug-source; install deps only; build via "make nodejs" from repo root. - const repoRoot = path.join(__dirname, "..", ".."); - const repoCmake = path.join(repoRoot, "CMakeLists.txt"); - if (fsCallback.existsSync(repoCmake)) { - console.log("Full clone layout: installing dependencies only. Run 'make nodejs' from repo root to build."); - const nodeModulesDir = path.join(__dirname, "node_modules"); - const lockFile = path.join(__dirname, "package-lock.json"); - if (fsCallback.existsSync(nodeModulesDir)) { - fsCallback.rmSync(nodeModulesDir, { recursive: true, force: true }); - } - if (fsCallback.existsSync(lockFile)) { - fsCallback.unlinkSync(lockFile); - } - const env = { ...process.env, NPM_CONFIG_IGNORE_SCRIPTS: "true" }; - childProcess.execSync("npm install --ignore-scripts --legacy-peer-deps", { cwd: __dirname, stdio: "inherit", env }); - process.exit(0); - } - console.error( - "lbug-source/ not found (install from git clone). Add prebuilt binary to prebuilt/lbugjs-" + - platform + - "-" + - arch + - ".node and commit, or install from a full clone and build there." - ); - process.exit(1); -} - // Get number of threads const THREADS = os.cpus().length; console.log(`Using ${THREADS} threads to build Lbug.`); -// Install dependencies only; skip install script so nested install.js does not run (no lbug-source there). +// Install dependencies console.log("Installing dependencies..."); -const innerNpmEnv = { ...process.env, NPM_CONFIG_IGNORE_SCRIPTS: "true" }; -childProcess.execSync("npm install --ignore-scripts --legacy-peer-deps", { +childProcess.execSync("npm install", { cwd: path.join(__dirname, "lbug-source", "tools", "nodejs_api"), stdio: "inherit", - env: innerNpmEnv, }); // Build the Lbug source code diff --git a/tools/nodejs_api/package.json b/tools/nodejs_api/package.json index 24212114a8..f6ff61a8b3 100644 --- a/tools/nodejs_api/package.json +++ b/tools/nodejs_api/package.json @@ -5,19 +5,14 @@ "main": "index.js", "module": "./index.mjs", "types": "./lbug.d.ts", - "exports": { - ".": { + "exports":{ + ".":{ "require": "./index.js", "import": "./index.mjs", "types": "./lbug.d.ts" } }, - "files": [ - "index.js", - "index.mjs", - "lbug.d.ts", - "lbugjs.node" - ], + "files": ["index.js", "index.mjs", "lbug.d.ts", "lbugjs.node"], "type": "commonjs", "homepage": "https://ladybugdb.com/", "repository": { @@ -25,8 +20,7 @@ "url": "git+https://github.com/LadybugDB/ladybug.git" }, "scripts": { - "install": "node install.js", - "test": "node --test test/test.js --test-timeout=20000", + "test": "mocha test --timeout 20000", "clean": "node clean.js", "clean-all": "node clean.js all", "build": "node build.js", @@ -34,12 +28,13 @@ }, "author": "Ladybug Team", "license": "MIT", - "engines": { - "node": ">=20.0.0" + "devDependencies": { + "chai": "^4.4.1", + "mocha": "^10.4.0", + "tmp": "^0.2.3" }, - "devDependencies": {}, "dependencies": { - "cmake-js": "^8.0.0", - "node-addon-api": "^8.5.0" + "cmake-js": "^7.3.0", + "node-addon-api": "^6.0.0" } -} \ No newline at end of file +} diff --git a/tools/nodejs_api/src_cpp/include/node_connection.h b/tools/nodejs_api/src_cpp/include/node_connection.h index 17c94fa7b0..caacef92c3 100644 --- a/tools/nodejs_api/src_cpp/include/node_connection.h +++ b/tools/nodejs_api/src_cpp/include/node_connection.h @@ -8,7 +8,6 @@ #include "node_prepared_statement.h" #include "node_progress_bar_display.h" #include "node_query_result.h" -#include "node_scan_replacement.h" #include using namespace lbug::main; @@ -31,22 +30,15 @@ class NodeConnection : public Napi::ObjectWrap { void InitCppConnection(); void SetMaxNumThreadForExec(const Napi::CallbackInfo& info); void SetQueryTimeout(const Napi::CallbackInfo& info); - void Interrupt(const Napi::CallbackInfo& info); Napi::Value ExecuteAsync(const Napi::CallbackInfo& info); Napi::Value QueryAsync(const Napi::CallbackInfo& info); Napi::Value ExecuteSync(const Napi::CallbackInfo& info); Napi::Value QuerySync(const Napi::CallbackInfo& info); void Close(const Napi::CallbackInfo& info); - Napi::Value RegisterStream(const Napi::CallbackInfo& info); - void UnregisterStream(const Napi::CallbackInfo& info); - void ReturnChunk(const Napi::CallbackInfo& info); - Napi::Value GetNumNodes(const Napi::CallbackInfo& info); - Napi::Value GetNumRels(const Napi::CallbackInfo& info); private: std::shared_ptr database; std::shared_ptr connection; - std::unique_ptr streamRegistry; }; class ConnectionInitAsyncWorker : public Napi::AsyncWorker { diff --git a/tools/nodejs_api/src_cpp/include/node_query_result.h b/tools/nodejs_api/src_cpp/include/node_query_result.h index 0b07c97b49..b9ee4db979 100644 --- a/tools/nodejs_api/src_cpp/include/node_query_result.h +++ b/tools/nodejs_api/src_cpp/include/node_query_result.h @@ -37,7 +37,6 @@ class NodeQueryResult : public Napi::ObjectWrap { Napi::Value GetColumnNamesSync(const Napi::CallbackInfo& info); Napi::Value GetQuerySummarySync(const Napi::CallbackInfo& info); Napi::Value GetQuerySummaryAsync(const Napi::CallbackInfo& info); - Napi::Value GetToStringSync(const Napi::CallbackInfo& info); void PopulateColumnNames(); void Close(const Napi::CallbackInfo& info); void Close(); @@ -103,7 +102,6 @@ class NodeQueryResultGetNextAsyncWorker : public Napi::AsyncWorker { try { if (!nodeQueryResult->queryResult->hasNext()) { cppTuple.reset(); - return; } cppTuple = nodeQueryResult->queryResult->getNext(); } catch (const std::exception& exc) { @@ -114,7 +112,7 @@ class NodeQueryResultGetNextAsyncWorker : public Napi::AsyncWorker { inline void OnOK() override { auto env = Env(); if (cppTuple == nullptr) { - Callback().Call({env.Null(), env.Null()}); + Callback().Call({env.Null(), env.Undefined()}); return; } Napi::Object nodeTuple = Napi::Object::New(env); diff --git a/tools/nodejs_api/src_cpp/include/node_scan_replacement.h b/tools/nodejs_api/src_cpp/include/node_scan_replacement.h deleted file mode 100644 index bd0109005a..0000000000 --- a/tools/nodejs_api/src_cpp/include/node_scan_replacement.h +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -#include "function/table/scan_replacement.h" -#include "function/table/table_function.h" -#include - -namespace lbug { -namespace main { -class Connection; -} -namespace common { -class LogicalType; -} -} - -struct NodeStreamChunkRequest { - std::mutex mtx; - std::condition_variable cv; - std::vector> rows; - std::vector columnNames; // schema order for object rows - bool done = false; - bool filled = false; -}; - -struct NodeStreamSourceState { - Napi::ThreadSafeFunction getChunkTsf; - std::vector columnNames; - std::vector columnTypes; -}; - -class NodeStreamRegistry { -public: - void registerSource(const std::string& name, Napi::ThreadSafeFunction tsf, - std::vector columnNames, - std::vector columnTypes); - void unregisterSource(const std::string& name); - std::vector lookup(const std::string& name) const; - std::unique_ptr replace( - std::span handles) const; - - static NodeStreamChunkRequest* getChunkRequest(uint64_t requestId); - static void setChunkRequest(uint64_t requestId, std::unique_ptr req); - static uint64_t nextRequestId(); - -private: - mutable std::mutex mtx_; - std::unordered_map> sources_; -}; - -void addNodeScanReplacement(lbug::main::Connection* connection, NodeStreamRegistry* registry); - -void returnChunkFromJS(uint64_t requestId, Napi::Array rowsNapi, bool done); diff --git a/tools/nodejs_api/src_cpp/include/node_stream_scan.h b/tools/nodejs_api/src_cpp/include/node_stream_scan.h deleted file mode 100644 index 5c7328b6be..0000000000 --- a/tools/nodejs_api/src_cpp/include/node_stream_scan.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include "function/table/bind_data.h" -#include "function/table/table_function.h" - -struct NodeStreamSourceState; // defined in node_scan_replacement.h - -struct NodeStreamScanFunctionData : lbug::function::TableFuncBindData { - std::shared_ptr state; - - NodeStreamScanFunctionData(lbug::binder::expression_vector columns, - std::shared_ptr state) - : TableFuncBindData(std::move(columns), 0), state(std::move(state)) {} - - std::unique_ptr copy() const override { - return std::make_unique(columns, state); - } -}; - -namespace lbug { -namespace function { - -struct NodeStreamScanFunction { - static constexpr const char* name = "NODE_STREAM_SCAN"; - static TableFunction getFunction(); -}; - -} // namespace function -} // namespace lbug diff --git a/tools/nodejs_api/src_cpp/include/node_util.h b/tools/nodejs_api/src_cpp/include/node_util.h index cd27b21371..4ea7a50674 100644 --- a/tools/nodejs_api/src_cpp/include/node_util.h +++ b/tools/nodejs_api/src_cpp/include/node_util.h @@ -10,10 +10,10 @@ class Util { static Napi::Value ConvertToNapiObject(const Value& value, Napi::Env env); static std::unordered_map> TransformParametersForExec( Napi::Array params); - static Value TransformNapiValue(Napi::Value napiValue); private: static Napi::Object ConvertNodeIdToNapiObject(const nodeID_t& nodeId, Napi::Env env); + static Value TransformNapiValue(Napi::Value napiValue); const static int64_t JS_MAX_SAFE_INTEGER = 9007199254740991; const static int64_t JS_MIN_SAFE_INTEGER = -9007199254740991; }; diff --git a/tools/nodejs_api/src_cpp/node_connection.cpp b/tools/nodejs_api/src_cpp/node_connection.cpp index 6ec6b0e65a..404903904f 100644 --- a/tools/nodejs_api/src_cpp/node_connection.cpp +++ b/tools/nodejs_api/src_cpp/node_connection.cpp @@ -1,14 +1,11 @@ #include "include/node_connection.h" -#include #include #include "include/node_database.h" #include "include/node_query_result.h" -#include "include/node_scan_replacement.h" #include "include/node_util.h" #include "main/lbug.h" -#include "main/storage_driver.h" Napi::Object NodeConnection::Init(Napi::Env env, Napi::Object exports) { Napi::HandleScope scope(env); @@ -22,13 +19,7 @@ Napi::Object NodeConnection::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("querySync", &NodeConnection::QuerySync), InstanceMethod("setMaxNumThreadForExec", &NodeConnection::SetMaxNumThreadForExec), InstanceMethod("setQueryTimeout", &NodeConnection::SetQueryTimeout), - InstanceMethod("interrupt", &NodeConnection::Interrupt), - InstanceMethod("close", &NodeConnection::Close), - InstanceMethod("registerStream", &NodeConnection::RegisterStream), - InstanceMethod("unregisterStream", &NodeConnection::UnregisterStream), - InstanceMethod("returnChunk", &NodeConnection::ReturnChunk), - InstanceMethod("getNumNodes", &NodeConnection::GetNumNodes), - InstanceMethod("getNumRels", &NodeConnection::GetNumRels)}); + InstanceMethod("close", &NodeConnection::Close)}); exports.Set("NodeConnection", t); return exports; @@ -66,8 +57,6 @@ void NodeConnection::InitCppConnection() { this->connection = std::make_shared(database.get()); ProgressBar::Get(*connection->getClientContext()) ->setDisplay(std::make_shared()); - streamRegistry = std::make_unique(); - addNodeScanReplacement(connection.get(), streamRegistry.get()); // After the connection is initialized, we do not need to hold a reference to the database. database.reset(); } @@ -94,16 +83,9 @@ void NodeConnection::SetQueryTimeout(const Napi::CallbackInfo& info) { } } -void NodeConnection::Interrupt(const Napi::CallbackInfo& /* info */) { - if (this->connection) { - this->connection->interrupt(); - } -} - void NodeConnection::Close(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); Napi::HandleScope scope(env); - streamRegistry.reset(); this->connection.reset(); } @@ -175,139 +157,3 @@ Napi::Value NodeConnection::QueryAsync(const Napi::CallbackInfo& info) { asyncWorker->Queue(); return info.Env().Undefined(); } - -static lbug::common::LogicalType parseColumnType(const std::string& typeStr) { - std::string upper = typeStr; - std::transform(upper.begin(), upper.end(), upper.begin(), ::toupper); - if (upper == "INT64") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT64); - if (upper == "INT32") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT32); - if (upper == "INT16") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT16); - if (upper == "INT8") return lbug::common::LogicalType(lbug::common::LogicalTypeID::INT8); - if (upper == "UINT64") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT64); - if (upper == "UINT32") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT32); - if (upper == "UINT16") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT16); - if (upper == "UINT8") return lbug::common::LogicalType(lbug::common::LogicalTypeID::UINT8); - if (upper == "DOUBLE") return lbug::common::LogicalType(lbug::common::LogicalTypeID::DOUBLE); - if (upper == "FLOAT") return lbug::common::LogicalType(lbug::common::LogicalTypeID::FLOAT); - if (upper == "STRING") return lbug::common::LogicalType(lbug::common::LogicalTypeID::STRING); - if (upper == "BOOL" || upper == "BOOLEAN") - return lbug::common::LogicalType(lbug::common::LogicalTypeID::BOOL); - if (upper == "DATE") return lbug::common::LogicalType(lbug::common::LogicalTypeID::DATE); - if (upper == "TIMESTAMP") - return lbug::common::LogicalType(lbug::common::LogicalTypeID::TIMESTAMP); - throw std::runtime_error("Unsupported column type for registerStream: " + typeStr); -} - -Napi::Value NodeConnection::RegisterStream(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - Napi::HandleScope scope(env); - if (!connection || !streamRegistry) { - Napi::Error::New(env, "Connection not initialized.").ThrowAsJavaScriptException(); - return env.Undefined(); - } - if (info.Length() < 3 || !info[0].IsString() || !info[1].IsFunction() || !info[2].IsArray()) { - Napi::Error::New(env, - "registerStream(name, getChunkCallback, columns): name string, getChunkCallback " - "function(requestId), columns array of { name, type }.") - .ThrowAsJavaScriptException(); - return env.Undefined(); - } - std::string name = info[0].As().Utf8Value(); - Napi::Function getChunkCallback = info[1].As(); - Napi::Array columnsArr = info[2].As(); - std::vector columnNames; - std::vector columnTypes; - for (uint32_t i = 0; i < columnsArr.Length(); i++) { - Napi::Value col = columnsArr.Get(i); - if (!col.IsObject()) continue; - Napi::Object obj = col.As(); - if (!obj.Get("name").IsString() || !obj.Get("type").IsString()) continue; - columnNames.push_back(obj.Get("name").As().Utf8Value()); - columnTypes.push_back( - parseColumnType(obj.Get("type").As().Utf8Value())); - } - if (columnNames.empty()) { - Napi::Error::New(env, "registerStream: at least one column required.").ThrowAsJavaScriptException(); - return env.Undefined(); - } - try { - auto tsf = Napi::ThreadSafeFunction::New( - env, getChunkCallback, "NodeStreamGetChunk", 0, 1); - streamRegistry->registerSource(name, std::move(tsf), std::move(columnNames), - std::move(columnTypes)); - } catch (const std::exception& exc) { - Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); - } - return env.Undefined(); -} - -void NodeConnection::UnregisterStream(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - Napi::HandleScope scope(env); - if (!streamRegistry) return; - if (info.Length() < 1 || !info[0].IsString()) { - Napi::Error::New(env, "unregisterStream(name): name string.").ThrowAsJavaScriptException(); - return; - } - streamRegistry->unregisterSource(info[0].As().Utf8Value()); -} - -void NodeConnection::ReturnChunk(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - if (info.Length() < 3 || !info[0].IsNumber() || !info[1].IsArray() || !info[2].IsBoolean()) { - Napi::Error::New(env, - "returnChunk(requestId, rows, done): requestId number, rows array of rows, done boolean.") - .ThrowAsJavaScriptException(); - return; - } - uint64_t requestId = static_cast(info[0].ToNumber().Int64Value()); - Napi::Array rows = info[1].As(); - bool done = info[2].ToBoolean().Value(); - returnChunkFromJS(requestId, rows, done); -} - -Napi::Value NodeConnection::GetNumNodes(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - Napi::HandleScope scope(env); - if (!connection) { - Napi::Error::New(env, "Connection not initialized.").ThrowAsJavaScriptException(); - return env.Undefined(); - } - if (info.Length() < 1 || !info[0].IsString()) { - Napi::Error::New(env, "getNumNodes(nodeName): nodeName string required.").ThrowAsJavaScriptException(); - return env.Undefined(); - } - try { - Database* db = connection->getClientContext()->getDatabase(); - StorageDriver storageDriver(db); - std::string nodeName = info[0].As().Utf8Value(); - uint64_t count = storageDriver.getNumNodes(nodeName); - return Napi::Number::New(env, static_cast(count)); - } catch (const std::exception& exc) { - Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); - } - return env.Undefined(); -} - -Napi::Value NodeConnection::GetNumRels(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - Napi::HandleScope scope(env); - if (!connection) { - Napi::Error::New(env, "Connection not initialized.").ThrowAsJavaScriptException(); - return env.Undefined(); - } - if (info.Length() < 1 || !info[0].IsString()) { - Napi::Error::New(env, "getNumRels(relName): relName string required.").ThrowAsJavaScriptException(); - return env.Undefined(); - } - try { - Database* db = connection->getClientContext()->getDatabase(); - StorageDriver storageDriver(db); - std::string relName = info[0].As().Utf8Value(); - uint64_t count = storageDriver.getNumRels(relName); - return Napi::Number::New(env, static_cast(count)); - } catch (const std::exception& exc) { - Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); - } - return env.Undefined(); -} diff --git a/tools/nodejs_api/src_cpp/node_query_result.cpp b/tools/nodejs_api/src_cpp/node_query_result.cpp index d202b705d4..24c18222ac 100644 --- a/tools/nodejs_api/src_cpp/node_query_result.cpp +++ b/tools/nodejs_api/src_cpp/node_query_result.cpp @@ -25,7 +25,6 @@ Napi::Object NodeQueryResult::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("getColumnNamesSync", &NodeQueryResult::GetColumnNamesSync), InstanceMethod("getQuerySummaryAsync", &NodeQueryResult::GetQuerySummaryAsync), InstanceMethod("getQuerySummarySync", &NodeQueryResult::GetQuerySummarySync), - InstanceMethod("toStringSync", &NodeQueryResult::GetToStringSync), InstanceMethod("close", &NodeQueryResult::Close)}); exports.Set("NodeQueryResult", t); @@ -229,17 +228,6 @@ Napi::Value NodeQueryResult::GetQuerySummarySync(const Napi::CallbackInfo& info) return env.Undefined(); } -Napi::Value NodeQueryResult::GetToStringSync(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - Napi::HandleScope scope(env); - try { - return Napi::String::New(env, this->queryResult->toString()); - } catch (const std::exception& exc) { - Napi::Error::New(env, std::string(exc.what())).ThrowAsJavaScriptException(); - } - return env.Undefined(); -} - void NodeQueryResult::Close(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); Napi::HandleScope scope(env); diff --git a/tools/nodejs_api/src_cpp/node_scan_replacement.cpp b/tools/nodejs_api/src_cpp/node_scan_replacement.cpp deleted file mode 100644 index 8fdb757f46..0000000000 --- a/tools/nodejs_api/src_cpp/node_scan_replacement.cpp +++ /dev/null @@ -1,134 +0,0 @@ -#include "include/node_scan_replacement.h" -#include "include/node_stream_scan.h" -#include "include/node_util.h" - -#include "function/table/bind_input.h" -#include "main/client_context.h" -#include "main/connection.h" - -#include - -using namespace lbug::common; -using namespace lbug::function; -using namespace lbug::main; - -namespace { - -std::mutex g_requestMutex; -std::atomic g_nextRequestId{1}; -std::unordered_map> g_chunkRequests; - -} // namespace - -void NodeStreamRegistry::registerSource(const std::string& name, Napi::ThreadSafeFunction tsf, - std::vector columnNames, std::vector columnTypes) { - std::lock_guard lock(mtx_); - auto state = std::make_shared(); - state->getChunkTsf = std::move(tsf); - state->columnNames = std::move(columnNames); - state->columnTypes = std::move(columnTypes); - sources_[name] = std::move(state); -} - -void NodeStreamRegistry::unregisterSource(const std::string& name) { - std::lock_guard lock(mtx_); - sources_.erase(name); -} - -std::vector NodeStreamRegistry::lookup(const std::string& name) const { - std::lock_guard lock(mtx_); - auto it = sources_.find(name); - if (it == sources_.end()) { - return {}; - } - return {reinterpret_cast(it->second.get())}; -} - -std::unique_ptr NodeStreamRegistry::replace( - std::span handles) const { - if (handles.empty()) { - return nullptr; - } - auto* statePtr = reinterpret_cast(handles[0]); - auto state = std::shared_ptr(statePtr, [](NodeStreamSourceState*) {}); - auto data = std::make_unique(); - data->func = NodeStreamScanFunction::getFunction(); - data->bindInput.addLiteralParam(Value::createValue(reinterpret_cast(statePtr))); - return data; -} - -NodeStreamChunkRequest* NodeStreamRegistry::getChunkRequest(uint64_t requestId) { - std::lock_guard lock(g_requestMutex); - auto it = g_chunkRequests.find(requestId); - return it != g_chunkRequests.end() ? it->second.get() : nullptr; -} - -void NodeStreamRegistry::setChunkRequest(uint64_t requestId, - std::unique_ptr req) { - std::lock_guard lock(g_requestMutex); - if (req) { - g_chunkRequests[requestId] = std::move(req); - } else { - g_chunkRequests.erase(requestId); - } -} - -uint64_t NodeStreamRegistry::nextRequestId() { - return g_nextRequestId++; -} - -static std::vector lookupNodeStream(const std::string& objectName, - void* registryVoid) { - auto* registry = static_cast(registryVoid); - return registry->lookup(objectName); -} - -static std::unique_ptr replaceNodeStream( - std::span handles, void* registryVoid) { - auto* registry = static_cast(registryVoid); - return registry->replace(handles); -} - -void addNodeScanReplacement(Connection* connection, NodeStreamRegistry* registry) { - auto lookup = [registry](const std::string& name) { - return lookupNodeStream(name, registry); - }; - auto replace = [registry](std::span handles) { - return replaceNodeStream(handles, registry); - }; - connection->getClientContext()->addScanReplace(ScanReplacement(std::move(lookup), replace)); -} - -void returnChunkFromJS(uint64_t requestId, Napi::Array rowsNapi, bool done) { - auto* req = NodeStreamRegistry::getChunkRequest(requestId); - if (!req) { - return; - } - std::vector> rows; - const size_t numRows = rowsNapi.Length(); - rows.reserve(numRows); - for (size_t r = 0; r < numRows; r++) { - Napi::Value rowVal = rowsNapi.Get(r); - std::vector row; - if (rowVal.IsArray()) { - auto arr = rowVal.As(); - for (size_t c = 0; c < arr.Length(); c++) { - row.push_back(Util::TransformNapiValue(arr.Get(c))); - } - } else if (rowVal.IsObject() && !rowVal.IsNull() && !rowVal.IsUndefined()) { - auto obj = rowVal.As(); - const auto& colNames = req->columnNames; - for (const auto& colName : colNames) { - row.push_back(Util::TransformNapiValue(obj.Get(colName))); - } - } - rows.push_back(std::move(row)); - } - { - std::lock_guard lock(req->mtx); - req->rows = std::move(rows); - req->done = done; - req->filled = true; - } - req->cv.notify_one(); -} diff --git a/tools/nodejs_api/src_cpp/node_stream_scan.cpp b/tools/nodejs_api/src_cpp/node_stream_scan.cpp deleted file mode 100644 index 529c10b91d..0000000000 --- a/tools/nodejs_api/src_cpp/node_stream_scan.cpp +++ /dev/null @@ -1,103 +0,0 @@ -#include "include/node_stream_scan.h" -#include "include/node_scan_replacement.h" - -#include "binder/binder.h" -#include "common/constants.h" -#include "common/system_config.h" -#include "function/table/bind_input.h" -#include "processor/execution_context.h" -#include "processor/result/factorized_table.h" - -#include - -using namespace lbug::common; -using namespace lbug::function; - -namespace lbug { - -namespace { -std::mutex g_nodeStreamTableFuncMutex; -} - -static std::unique_ptr bindFunc(lbug::main::ClientContext*, - const TableFuncBindInput* input) { - auto* statePtr = reinterpret_cast(input->getLiteralVal(0)); - KU_ASSERT(statePtr != nullptr); - std::shared_ptr state(statePtr, [](NodeStreamSourceState*) {}); - auto columns = input->binder->createVariables(state->columnNames, state->columnTypes); - return std::make_unique(std::move(columns), state); -} - -static std::unique_ptr initSharedState( - const TableFuncInitSharedStateInput&) { - return std::make_unique(0); -} - -static std::unique_ptr initLocalState( - const TableFuncInitLocalStateInput&) { - return std::make_unique(); -} - -static offset_t tableFunc(const TableFuncInput& input, TableFuncOutput& output) { - auto* bindData = input.bindData->constPtrCast(); - auto& state = *bindData->state; - std::unique_lock streamLock(g_nodeStreamTableFuncMutex); - const uint64_t requestId = NodeStreamRegistry::nextRequestId(); - auto req = std::make_unique(); - req->columnNames = state.columnNames; - NodeStreamRegistry::setChunkRequest(requestId, std::move(req)); - NodeStreamChunkRequest* reqPtr = NodeStreamRegistry::getChunkRequest(requestId); - KU_ASSERT(reqPtr != nullptr); - - state.getChunkTsf.BlockingCall(&requestId, - [](Napi::Env env, Napi::Function jsCallback, const uint64_t* idPtr) { - jsCallback.Call({Napi::Number::New(env, static_cast(*idPtr))}); - }); - - std::vector> rowsCopy; - { - std::unique_lock lock(reqPtr->mtx); - reqPtr->cv.wait(lock, [reqPtr] { return reqPtr->filled; }); - rowsCopy = std::move(reqPtr->rows); - } - NodeStreamRegistry::setChunkRequest(requestId, nullptr); - streamLock.unlock(); - - const offset_t numRows = static_cast(rowsCopy.size()); - if (numRows == 0) { - return 0; - } - - const auto numCols = bindData->getNumColumns(); - const auto cap = std::min(numRows, static_cast(DEFAULT_VECTOR_CAPACITY)); - for (offset_t r = 0; r < cap; r++) { - for (auto c = 0u; c < numCols; c++) { - auto& vec = output.dataChunk.getValueVectorMutable(c); - if (c < rowsCopy[r].size() && !rowsCopy[r][c].isNull()) { - vec.setNull(r, false); - vec.copyFromValue(r, rowsCopy[r][c]); - } else { - vec.setNull(r, true); - } - } - } - output.dataChunk.state->getSelVectorUnsafe().setSelSize(cap); - return cap; -} - -static double progressFunc(TableFuncSharedState*) { - return 0.0; -} - -TableFunction NodeStreamScanFunction::getFunction() { - TableFunction func(name, std::vector{LogicalTypeID::POINTER}); - func.tableFunc = tableFunc; - func.bindFunc = bindFunc; - func.initSharedStateFunc = initSharedState; - func.initLocalStateFunc = initLocalState; - func.progressFunc = progressFunc; - func.canParallelFunc = [] { return false; }; - return func; -} - -} // namespace lbug diff --git a/tools/nodejs_api/src_js/connection.js b/tools/nodejs_api/src_js/connection.js index d89e8f58de..e8583d7cb4 100644 --- a/tools/nodejs_api/src_js/connection.js +++ b/tools/nodejs_api/src_js/connection.js @@ -121,20 +121,13 @@ class Connection { * Execute a prepared statement with the given parameters. * @param {lbug.PreparedStatement} preparedStatement the prepared statement to execute. * @param {Object} params a plain object mapping parameter names to values. - * @param {Object|Function} [optionsOrProgressCallback] - Options { signal?: AbortSignal, progressCallback?: Function } or legacy progress callback. - * @returns {Promise} a promise that resolves to the query result. Rejects if error or options.signal is aborted. + * @param {Function} [progressCallback] - Optional callback function that is invoked with the progress of the query execution. The callback receives three arguments: pipelineProgress, numPipelinesFinished, and numPipelines. + * @returns {Promise} a promise that resolves to the query result. The promise is rejected if there is an error. */ - execute(preparedStatement, params = {}, optionsOrProgressCallback) { - const { signal, progressCallback } = this._normalizeQueryOptions(optionsOrProgressCallback); + execute(preparedStatement, params = {}, progressCallback) { return new Promise((resolve, reject) => { - if (progressCallback !== undefined && typeof progressCallback !== "function") { - return reject(new Error("progressCallback must be a function.")); - } - if (optionsOrProgressCallback != null && typeof optionsOrProgressCallback !== "function" && typeof optionsOrProgressCallback !== "object") { - return reject(new Error("progressCallback must be a function.")); - } if ( - typeof preparedStatement !== "object" || + !typeof preparedStatement === "object" || preparedStatement.constructor.name !== "PreparedStatement" ) { return reject( @@ -154,29 +147,11 @@ class Connection { const value = params[key]; paramArray.push([key, value]); } - if (signal?.aborted) { - return reject(this._createAbortError()); - } - let abortListener; - const cleanup = () => { - if (signal && abortListener) { - signal.removeEventListener("abort", abortListener); - } - }; - if (signal) { - abortListener = () => { - this.interrupt(); - cleanup(); - reject(this._createAbortError()); - }; - signal.addEventListener("abort", abortListener); + if (progressCallback && typeof progressCallback !== "function") { + return reject(new Error("progressCallback must be a function.")); } this._getConnection() .then((connection) => { - if (signal?.aborted) { - cleanup(); - return reject(this._createAbortError()); - } const nodeQueryResult = new LbugNative.NodeQueryResult(); try { connection.executeAsync( @@ -184,11 +159,7 @@ class Connection { nodeQueryResult, paramArray, (err) => { - cleanup(); if (err) { - if (signal?.aborted && err.message === "Interrupted.") { - return reject(this._createAbortError()); - } return reject(err); } this._unwrapMultipleQueryResults(nodeQueryResult) @@ -202,12 +173,10 @@ class Connection { progressCallback ); } catch (e) { - cleanup(); return reject(e); } }) .catch((err) => { - cleanup(); return reject(err); }); }); @@ -292,65 +261,26 @@ class Connection { return new PreparedStatement(this, preparedStatement); } - /** - * Interrupt the currently executing query on this connection. - * No-op if the connection is not initialized or no query is running. - */ - interrupt() { - if (this._connection) { - this._connection.interrupt(); - } - } - /** * Execute a query. * @param {String} statement the statement to execute. - * @param {Object|Function} [optionsOrProgressCallback] - Options object { signal?: AbortSignal, progressCallback?: Function } or legacy progress callback. - * @returns {Promise} a promise that resolves to the query result. The promise is rejected if there is an error or if options.signal is aborted. + * @param {Function} [progressCallback] - Optional callback function that is invoked with the progress of the query execution. The callback receives three arguments: pipelineProgress, numPipelinesFinished, and numPipelines. + * @returns {Promise} a promise that resolves to the query result. The promise is rejected if there is an error. */ - query(statement, optionsOrProgressCallback) { - const { signal, progressCallback } = this._normalizeQueryOptions(optionsOrProgressCallback); + query(statement, progressCallback) { return new Promise((resolve, reject) => { - if (progressCallback !== undefined && typeof progressCallback !== "function") { - return reject(new Error("progressCallback must be a function.")); - } - if (optionsOrProgressCallback != null && typeof optionsOrProgressCallback !== "function" && typeof optionsOrProgressCallback !== "object") { - return reject(new Error("progressCallback must be a function.")); - } if (typeof statement !== "string") { return reject(new Error("statement must be a string.")); } - if (signal?.aborted) { - return reject(this._createAbortError()); - } - let abortListener; - const cleanup = () => { - if (signal && abortListener) { - signal.removeEventListener("abort", abortListener); - } - }; - if (signal) { - abortListener = () => { - this.interrupt(); - cleanup(); - reject(this._createAbortError()); - }; - signal.addEventListener("abort", abortListener); + if (progressCallback && typeof progressCallback !== "function") { + return reject(new Error("progressCallback must be a function.")); } this._getConnection() .then((connection) => { - if (signal?.aborted) { - cleanup(); - return reject(this._createAbortError()); - } const nodeQueryResult = new LbugNative.NodeQueryResult(); try { connection.queryAsync(statement, nodeQueryResult, (err) => { - cleanup(); if (err) { - if (signal?.aborted && err.message === "Interrupted.") { - return reject(this._createAbortError()); - } return reject(err); } this._unwrapMultipleQueryResults(nodeQueryResult) @@ -363,37 +293,15 @@ class Connection { }, progressCallback); } catch (e) { - cleanup(); return reject(e); } }) .catch((err) => { - cleanup(); return reject(err); }); }); } - _normalizeQueryOptions(optionsOrProgressCallback) { - if (optionsOrProgressCallback == null) { - return { signal: undefined, progressCallback: undefined }; - } - if (typeof optionsOrProgressCallback === "function") { - return { signal: undefined, progressCallback: optionsOrProgressCallback }; - } - if (typeof optionsOrProgressCallback === "object" && optionsOrProgressCallback !== null) { - return { - signal: optionsOrProgressCallback.signal, - progressCallback: optionsOrProgressCallback.progressCallback, - }; - } - return { signal: undefined, progressCallback: undefined }; - } - - _createAbortError() { - return new DOMException("The operation was aborted.", "AbortError"); - } - /** * Check that the connection is alive (e.g. for connection pools or health checks). * Runs a trivial query; rejects if the connection is broken. @@ -412,127 +320,6 @@ class Connection { return true; } - /** - * Run EXPLAIN on a Cypher statement and return the plan as a string. - * @param {string} statement – Cypher statement (e.g. "MATCH (a:person) RETURN a") - * @returns {Promise} the plan string (one row per line) - */ - async explain(statement) { - if (typeof statement !== "string") { - throw new Error("explain: statement must be a string."); - } - const trimmed = statement.trim(); - const explainStatement = trimmed.toUpperCase().startsWith("EXPLAIN") ? trimmed : "EXPLAIN " + trimmed; - const result = await this.query(explainStatement); - const single = Array.isArray(result) ? result[0] : result; - const rows = await single.getAll(); - single.close(); - if (rows.length === 0) { - return ""; - } - return rows - .map((row) => Object.values(row).join(" | ")) - .join("\n"); - } - - /** - * Get the number of nodes in a node table. Connection must be initialized. - * @param {string} nodeName – name of the node table (e.g. "User") - * @returns {number} count of nodes - */ - getNumNodes(nodeName) { - if (typeof nodeName !== "string") { - throw new Error("getNumNodes(nodeName): nodeName must be a string."); - } - const connection = this._getConnectionSync(); - return connection.getNumNodes(nodeName); - } - - /** - * Get the number of relationships in a rel table. Connection must be initialized. - * @param {string} relName – name of the rel table (e.g. "Follows") - * @returns {number} count of relationships - */ - getNumRels(relName) { - if (typeof relName !== "string") { - throw new Error("getNumRels(relName): relName must be a string."); - } - const connection = this._getConnectionSync(); - return connection.getNumRels(relName); - } - - /** - * Register a stream source for LOAD FROM name. The source must be AsyncIterable; each yielded - * value is a row (array of column values in schema order, or object keyed by column name). - * Call unregisterStream(name) when done or before reusing the name. - * @param {string} name – name used in Cypher: LOAD FROM name RETURN ... - * @param {AsyncIterable|Object>} source – async iterable of rows - * @param {{ columns: Array<{ name: string, type: string }> }} options – schema (required). type: INT64, INT32, DOUBLE, STRING, BOOL, DATE, etc. - */ - async registerStream(name, source, options = {}) { - if (typeof name !== "string") { - throw new Error("registerStream: name must be a string."); - } - const columns = options.columns; - if (!Array.isArray(columns) || columns.length === 0) { - throw new Error("registerStream: options.columns (array of { name, type }) is required."); - } - const conn = await this._getConnection(); - const it = source[Symbol.asyncIterator] ? source[Symbol.asyncIterator].call(source) : source; - const pending = []; - let consumerRunning = false; - - const toRows = (raw) => { - if (raw == null) return []; - if (Array.isArray(raw)) { - const first = raw[0]; - const isArrayOfRows = - raw.length > 0 && - (Array.isArray(first) || (typeof first === "object" && first !== null && !Array.isArray(first))); - return isArrayOfRows ? raw : [raw]; - } - return [raw]; - }; - - const runConsumer = async () => { - pending.sort((a, b) => a - b); - while (pending.length > 0) { - const requestId = pending.shift(); - try { - const n = await it.next(); - const { rows, done } = { rows: toRows(n.value), done: n.done }; - conn.returnChunk(requestId, rows, done); - } catch (e) { - conn.returnChunk(requestId, [], true); - } - } - consumerRunning = false; - }; - - const getChunk = (requestId) => { - pending.push(requestId); - if (!consumerRunning) { - consumerRunning = true; - setImmediate(() => runConsumer()); - } - }; - conn.registerStream(name, getChunk, columns); - } - - /** - * Unregister a stream source by name. - * @param {string} name – name passed to registerStream - */ - unregisterStream(name) { - if (typeof name !== "string") { - throw new Error("unregisterStream: name must be a string."); - } - if (!this._connection) { - return; - } - this._connection.unregisterStream(name); - } - /** * Execute a query synchronously. * @param {String} statement the statement to execute. This function blocks the main thread for the duration of the query, so use it with caution. diff --git a/tools/nodejs_api/src_js/database.js b/tools/nodejs_api/src_js/database.js index b7d7a79d6f..dc70582494 100644 --- a/tools/nodejs_api/src_js/database.js +++ b/tools/nodejs_api/src_js/database.js @@ -2,29 +2,6 @@ const LbugNative = require("./lbug_native.js"); -/** Error code when the database file is locked by another process. */ -const LBUG_DATABASE_LOCKED = "LBUG_DATABASE_LOCKED"; - -const LOCK_ERROR_MESSAGE = "Could not set lock on file"; - -function isLockError(err) { - return err && typeof err.message === "string" && err.message.includes(LOCK_ERROR_MESSAGE); -} - -function normalizeInitError(err) { - if (isLockError(err)) { - const e = new Error(err.message); - e.code = LBUG_DATABASE_LOCKED; - e.cause = err; - return e; - } - return err; -} - -function sleep(ms) { - return new Promise((r) => setTimeout(r, ms)); -} - class Database { /** * Initialize a new Database object. Note that the initialization is done @@ -49,8 +26,6 @@ class Database { * the error occured. * @param {Boolean} enableChecksums If true, the database will use checksums to detect corruption in the * WAL file. - * @param {Number} [openLockRetryMs=5000] When the database file is locked, retry opening for up to this many ms - * (grace period). Only applies to async init(); set to 0 to fail immediately. Ignored for in-memory databases. */ constructor( databasePath, @@ -62,7 +37,6 @@ class Database { checkpointThreshold = -1, throwOnWalReplayFailure = true, enableChecksums = true, - openLockRetryMs = 5000, ) { if (!databasePath) { databasePath = ":memory:"; @@ -79,9 +53,6 @@ class Database { if (typeof checkpointThreshold !== "number" || maxDBSize < -1) { throw new Error("Checkpoint threshold must be a positive integer."); } - if (typeof openLockRetryMs !== "number" || openLockRetryMs < 0) { - throw new Error("openLockRetryMs must be a non-negative number."); - } bufferManagerSize = Math.floor(bufferManagerSize); maxDBSize = Math.floor(maxDBSize); checkpointThreshold = Math.floor(checkpointThreshold); @@ -99,8 +70,6 @@ class Database { this._isInitialized = false; this._initPromise = null; this._isClosed = false; - // Grace period for lock: retry for up to openLockRetryMs (0 = no retry). In-memory has no file lock. - this._openLockRetryMs = databasePath === ":memory:" ? 0 : Math.floor(openLockRetryMs); } /** @@ -122,49 +91,27 @@ class Database { /** * Initialize the database. Calling this function is optional, as the * database is initialized automatically when the first query is executed. - * When the file is locked, init() retries for up to openLockRetryMs (default 5s) before throwing. */ async init() { if (!this._isInitialized) { if (!this._initPromise) { - const self = this; - const tryOnce = () => - new Promise((resolve, reject) => { - self._database.initAsync((err) => { - if (err) reject(err); - else { - self._isInitialized = true; - resolve(); + this._initPromise = new Promise((resolve, reject) => { + this._database.initAsync((err) => { + if (err) { + reject(err); + } else { + try { + this._isInitialized = true; + } catch (e) { + return reject(e); } - }); - }); - const OPEN_LOCK_DELAY_MS = 200; - - this._initPromise = (async () => { - const start = Date.now(); - for (;;) { - if (self._isClosed) throw new Error("Database is closed."); - try { - await tryOnce(); - return; - } catch (err) { - if (!isLockError(err)) throw normalizeInitError(err); - if ( - self._openLockRetryMs <= 0 || - Date.now() - start >= self._openLockRetryMs - ) { - throw normalizeInitError(err); - } - await sleep(OPEN_LOCK_DELAY_MS); + resolve(); } - } - })(); - } - try { - await this._initPromise; - } finally { - this._initPromise = null; + }); + }); } + await this._initPromise; + this._initPromise = null; } } @@ -180,11 +127,7 @@ class Database { if (this._isInitialized) { return; } - try { - this._database.initSync(); - } catch (err) { - throw normalizeInitError(err); - } + this._database.initSync(); this._isInitialized = true; } @@ -265,6 +208,4 @@ class Database { } } -Database.LBUG_DATABASE_LOCKED = LBUG_DATABASE_LOCKED; - module.exports = Database; diff --git a/tools/nodejs_api/src_js/index.js b/tools/nodejs_api/src_js/index.js index 3a0b35fd1c..d7da3f72b5 100644 --- a/tools/nodejs_api/src_js/index.js +++ b/tools/nodejs_api/src_js/index.js @@ -4,16 +4,12 @@ const Connection = require("./connection.js"); const Database = require("./database.js"); const PreparedStatement = require("./prepared_statement.js"); const QueryResult = require("./query_result.js"); -const { createPool, Pool } = require("./pool.js"); module.exports = { Connection, Database, PreparedStatement, QueryResult, - createPool, - Pool, - LBUG_DATABASE_LOCKED: Database.LBUG_DATABASE_LOCKED, get VERSION() { return Database.getVersion(); }, diff --git a/tools/nodejs_api/src_js/index.mjs b/tools/nodejs_api/src_js/index.mjs index bd921d7046..9293e40683 100644 --- a/tools/nodejs_api/src_js/index.mjs +++ b/tools/nodejs_api/src_js/index.mjs @@ -5,9 +5,6 @@ export const Database = lbug.Database; export const Connection = lbug.Connection; export const PreparedStatement = lbug.PreparedStatement; export const QueryResult = lbug.QueryResult; -export const createPool = lbug.createPool; -export const Pool = lbug.Pool; -export const LBUG_DATABASE_LOCKED = lbug.LBUG_DATABASE_LOCKED; export const VERSION = lbug.VERSION; export const STORAGE_VERSION = lbug.STORAGE_VERSION; export default lbug; diff --git a/tools/nodejs_api/src_js/lbug.d.ts b/tools/nodejs_api/src_js/lbug.d.ts index b81fea3b06..0f9a921f3a 100644 --- a/tools/nodejs_api/src_js/lbug.d.ts +++ b/tools/nodejs_api/src_js/lbug.d.ts @@ -22,15 +22,6 @@ export type ProgressCallback = ( numPipelines: number ) => void; -/** - * Options for query() and execute(). - * Use signal to cancel the operation via AbortController. - */ -export interface QueryOptions { - signal?: AbortSignal; - progressCallback?: ProgressCallback; -} - /** * Represents a node ID in the graph database. */ @@ -113,63 +104,6 @@ export interface SystemConfig { checkpointThreshold?: number; } -/** - * Options for createPool(). Same shape as Database constructor args (except path). - */ -export interface PoolDatabaseOptions { - bufferManagerSize?: number; - enableCompression?: boolean; - readOnly?: boolean; - maxDBSize?: number; - autoCheckpoint?: boolean; - checkpointThreshold?: number; - throwOnWalReplayFailure?: boolean; - enableChecksums?: boolean; - openLockRetryMs?: number; -} - -/** - * Options for createPool(). - */ -export interface PoolOptions { - /** Database file path (default ":memory:") */ - databasePath?: string; - /** Same shape as Database constructor options (bufferManagerSize, readOnly, etc.) */ - databaseOptions?: PoolDatabaseOptions; - /** Minimum connections to keep (default 0) */ - minSize?: number; - /** Maximum connections in the pool (required) */ - maxSize: number; - /** Max time to wait for acquire in ms (0 = wait forever, default 0) */ - acquireTimeoutMillis?: number; - /** If true, call conn.ping() before handing out (default false) */ - validateOnAcquire?: boolean; -} - -/** - * Connection pool: acquire/release or run(fn). One shared Database, up to maxSize Connection instances. - */ -export interface Pool { - /** Acquire a connection; must call release(conn) when done. Prefer run(fn) to avoid leaks. */ - acquire(): Promise; - /** Return a connection to the pool. */ - release(conn: Connection): void; - /** Run fn(conn); connection is released in finally (on success or throw). */ - run(fn: (conn: Connection) => Promise): Promise; - /** Close pool: reject new/pending acquire, then close all connections and database. */ - close(): Promise; -} - -/** Pool constructor (use createPool() instead of new Pool()). */ -export type PoolConstructor = new (options: PoolOptions) => Pool; - -/** - * Create a connection pool. - * @param options Pool options (maxSize required; databasePath, databaseOptions, minSize, acquireTimeoutMillis, validateOnAcquire optional) - * @returns Pool instance - */ -export function createPool(options: PoolOptions): Pool; - /** * Represents a Lbug database instance. */ @@ -185,7 +119,6 @@ export class Database { * @param checkpointThreshold Threshold for automatic checkpoints * @param throwOnWalReplayFailure If true, WAL replay failures throw; otherwise replay stops at error * @param enableChecksums If true, use checksums to detect WAL corruption - * @param openLockRetryMs When the file is locked, retry opening for up to this many ms (default 5000). Set 0 to fail immediately. Only for async init(); ignored for :memory: */ constructor( databasePath?: string, @@ -196,14 +129,12 @@ export class Database { autoCheckpoint?: boolean, checkpointThreshold?: number, throwOnWalReplayFailure?: boolean, - enableChecksums?: boolean, - openLockRetryMs?: number + enableChecksums?: boolean ); /** * Initialize the database. Calling this function is optional, as the * database is initialized automatically when the first query is executed. - * When the file is locked, retries for up to openLockRetryMs (default 5s) before throwing. * @returns Promise that resolves when initialization completes */ init(): Promise; @@ -273,12 +204,6 @@ export class Connection { */ setQueryTimeout(timeoutInMs: number): void; - /** - * Interrupt the currently executing query on this connection. - * No-op if the connection is not initialized or no query is running. - */ - interrupt(): void; - /** * Close the connection. * @returns Promise that resolves when connection is closed @@ -294,13 +219,13 @@ export class Connection { * Execute a prepared statement. * @param preparedStatement The prepared statement to execute * @param params Parameters for the query as a plain object - * @param optionsOrProgressCallback Options (e.g. signal for abort) or legacy progress callback - * @returns Promise that resolves to the query result(s). Rejects with DOMException AbortError if signal is aborted. + * @param progressCallback Optional progress callback + * @returns Promise that resolves to the query result(s) */ execute( preparedStatement: PreparedStatement, params?: Record, - optionsOrProgressCallback?: QueryOptions | ProgressCallback + progressCallback?: ProgressCallback ): Promise; /** @@ -331,12 +256,12 @@ export class Connection { /** * Execute a query. * @param statement The statement to execute - * @param optionsOrProgressCallback Options (e.g. signal for abort) or legacy progress callback - * @returns Promise that resolves to the query result(s). Rejects with DOMException AbortError if signal is aborted. + * @param progressCallback Optional progress callback + * @returns Promise that resolves to the query result(s) */ query( statement: string, - optionsOrProgressCallback?: QueryOptions | ProgressCallback + progressCallback?: ProgressCallback ): Promise; /** @@ -358,46 +283,6 @@ export class Connection { * @returns Promise that resolves to true if OK, rejects if connection is broken */ ping(): Promise; - - /** - * Run EXPLAIN on a Cypher statement and return the plan as a string. - * @param statement Cypher statement (e.g. "MATCH (a:person) RETURN a") - * @returns Promise that resolves to the plan string (one row per line) - */ - explain(statement: string): Promise; - - /** - * Get the number of nodes in a node table. Connection must be initialized. - * @param nodeName Name of the node table (e.g. "User") - * @returns Count of nodes - */ - getNumNodes(nodeName: string): number; - - /** - * Get the number of relationships in a rel table. Connection must be initialized. - * @param relName Name of the rel table (e.g. "Follows") - * @returns Count of relationships - */ - getNumRels(relName: string): number; - - /** - * Register a stream source for LOAD FROM name. Source must be AsyncIterable of rows (array or object). - * Unregister with unregisterStream(name) when done. - * @param name Name used in Cypher: LOAD FROM name RETURN ... - * @param source AsyncIterable of rows (array of column values or object keyed by column name) - * @param options.columns Schema: array of { name: string, type: string } (type: INT64, INT32, DOUBLE, STRING, BOOL, DATE, etc.) - */ - registerStream( - name: string, - source: AsyncIterable>, - options: { columns: Array<{ name: string; type: string }> } - ): Promise; - - /** - * Unregister a stream source by name. - * @param name Name passed to registerStream - */ - unregisterStream(name: string): void; } /** @@ -418,14 +303,6 @@ export class PreparedStatement { getErrorMessage(): string; } -/** - * Query summary with compiling and execution times (milliseconds). - */ -export interface QuerySummary { - compilingTime: number; - executionTime: number; -} - /** * Represents the results of a query execution. * Note: This class is created internally by Connection query methods. @@ -466,12 +343,6 @@ export class QueryResult implements AsyncIterable | nu */ getNextSync(): Record | null; - /** - * Return the query result as a string (header + rows). For failed queries returns the error message. - * @returns String representation of the result - */ - toString(): string; - /** * Return a Node.js Readable stream (object mode) that yields one row per chunk. * @returns Readable stream of row objects @@ -536,30 +407,12 @@ export class QueryResult implements AsyncIterable | nu */ getColumnNamesSync(): string[]; - /** - * Get the query summary (compiling and execution time in milliseconds). - * @returns Promise that resolves to the query summary - */ - getQuerySummary(): Promise; - - /** - * Get the query summary synchronously. - * @returns The query summary - */ - getQuerySummarySync(): QuerySummary; - /** * Close the result set and release resources. */ close(): void; } -/** - * Error code when the database file is locked by another process. - * Use with init() / initSync() or first query: catch and check err.code === LBUG_DATABASE_LOCKED. - */ -export const LBUG_DATABASE_LOCKED: "LBUG_DATABASE_LOCKED"; - /** * Default export for the Lbug module. */ @@ -568,9 +421,6 @@ declare const lbug: { Connection: typeof Connection; PreparedStatement: typeof PreparedStatement; QueryResult: typeof QueryResult; - createPool: typeof createPool; - Pool: PoolConstructor; - LBUG_DATABASE_LOCKED: typeof LBUG_DATABASE_LOCKED; VERSION: string; STORAGE_VERSION: bigint; }; diff --git a/tools/nodejs_api/src_js/pool.js b/tools/nodejs_api/src_js/pool.js deleted file mode 100644 index cc0d585113..0000000000 --- a/tools/nodejs_api/src_js/pool.js +++ /dev/null @@ -1,222 +0,0 @@ -"use strict"; - -const Database = require("./database.js"); -const Connection = require("./connection.js"); - -const DEFAULT_MIN_SIZE = 0; -const DEFAULT_ACQUIRE_TIMEOUT_MILLIS = 0; -const DEFAULT_VALIDATE_ON_ACQUIRE = false; - -function createDatabase(path, databaseOptions) { - const o = databaseOptions || {}; - return new Database( - path, - o.bufferManagerSize ?? 0, - o.enableCompression ?? true, - o.readOnly ?? false, - o.maxDBSize ?? 0, - o.autoCheckpoint ?? true, - o.checkpointThreshold ?? -1, - o.throwOnWalReplayFailure ?? true, - o.enableChecksums ?? true, - o.openLockRetryMs ?? 5000 - ); -} - -class Pool { - constructor(options) { - if (options == null || typeof options !== "object") { - throw new Error("createPool(options): options must be an object."); - } - const path = options.databasePath; - if (path !== undefined && path !== null && path !== "" && typeof path !== "string") { - throw new Error("createPool: databasePath must be a string or empty."); - } - const maxSize = options.maxSize; - if (typeof maxSize !== "number" || maxSize < 1 || !Number.isInteger(maxSize)) { - throw new Error("createPool: maxSize must be a positive integer."); - } - const minSize = options.minSize ?? DEFAULT_MIN_SIZE; - if (typeof minSize !== "number" || minSize < 0 || !Number.isInteger(minSize) || minSize > maxSize) { - throw new Error("createPool: minSize must be a non-negative integer not greater than maxSize."); - } - const acquireTimeoutMillis = options.acquireTimeoutMillis ?? DEFAULT_ACQUIRE_TIMEOUT_MILLIS; - if (typeof acquireTimeoutMillis !== "number" || acquireTimeoutMillis < 0) { - throw new Error("createPool: acquireTimeoutMillis must be a non-negative number."); - } - const validateOnAcquire = options.validateOnAcquire ?? DEFAULT_VALIDATE_ON_ACQUIRE; - - this._databasePath = path == null || path === "" ? ":memory:" : path; - this._databaseOptions = options.databaseOptions || null; - this._maxSize = maxSize; - this._minSize = minSize; - this._acquireTimeoutMillis = acquireTimeoutMillis; - this._validateOnAcquire = Boolean(validateOnAcquire); - - this._database = null; - this._idle = []; - this._allConnections = []; - this._checkedOut = new Set(); - this._waiters = []; - this._closed = false; - } - - _ensureDatabase() { - if (this._database === null) { - this._database = createDatabase(this._databasePath, this._databaseOptions); - } - return this._database; - } - - _createConnection() { - const db = this._ensureDatabase(); - const conn = new Connection(db); - this._allConnections.push(conn); - return conn; - } - - _wakeNextWaiter(conn) { - while (this._waiters.length > 0) { - const w = this._waiters.shift(); - if (w.timer) clearTimeout(w.timer); - this._checkedOut.add(conn); - w.resolve(conn); - return; - } - this._idle.push(conn); - } - - /** - * Acquire a connection from the pool. Must call release(conn) when done (e.g. in finally). - * Prefer pool.run(fn) to avoid forgetting release. - * @returns {Promise} - */ - acquire() { - if (this._closed) { - return Promise.reject(new Error("Pool is closed.")); - } - - while (this._allConnections.length < this._minSize) { - this._idle.push(this._createConnection()); - } - if (this._idle.length > 0) { - const conn = this._idle.shift(); - this._checkedOut.add(conn); - if (this._validateOnAcquire) { - return conn.ping().then(() => conn); - } - return Promise.resolve(conn); - } - if (this._allConnections.length < this._maxSize) { - const conn = this._createConnection(); - this._checkedOut.add(conn); - if (this._validateOnAcquire) { - return conn.ping().then(() => conn); - } - return Promise.resolve(conn); - } - - return new Promise((resolve, reject) => { - const entry = { - resolve, - reject, - timer: null, - }; - if (this._acquireTimeoutMillis > 0) { - entry.timer = setTimeout(() => { - const i = this._waiters.indexOf(entry); - if (i !== -1) { - this._waiters.splice(i, 1); - reject(new Error("Pool acquire timed out.")); - } - }, this._acquireTimeoutMillis); - } - this._waiters.push(entry); - }); - } - - /** - * Return a connection to the pool. No-op if pool is closed. - * @param {lbug.Connection} conn - */ - release(conn) { - if (this._closed) { - return; - } - if ( - conn == null || - typeof conn !== "object" || - conn.constructor.name !== "Connection" - ) { - throw new Error("release(conn): conn must be a Connection from this pool."); - } - if (!this._checkedOut.has(conn)) { - throw new Error("release(conn): connection not from this pool or already released."); - } - this._checkedOut.delete(conn); - this._wakeNextWaiter(conn); - } - - /** - * Run a function with a connection; connection is released in finally (on success or throw). - * @template T - * @param {(conn: lbug.Connection) => Promise} fn - * @returns {Promise} - */ - async run(fn) { - if (typeof fn !== "function") { - throw new Error("pool.run(fn): fn must be a function."); - } - const conn = await this.acquire(); - try { - return await fn(conn); - } finally { - this.release(conn); - } - } - - /** - * Close the pool: reject new and pending acquire, then close all connections and the database. - * @returns {Promise} - */ - async close() { - if (this._closed) { - return; - } - this._closed = true; - const err = new Error("Pool is closed."); - for (const w of this._waiters) { - if (w.timer) clearTimeout(w.timer); - w.reject(err); - } - this._waiters.length = 0; - this._idle.length = 0; - for (const conn of this._allConnections) { - try { - await conn.close(); - } catch (_) { - // ignore - } - } - this._allConnections.length = 0; - if (this._database) { - try { - await this._database.close(); - } catch (_) { - // ignore - } - this._database = null; - } - } -} - -/** - * Create a connection pool. One shared Database; up to maxSize Connection instances. - * @param {lbug.PoolOptions} options - * @returns {lbug.Pool} - */ -function createPool(options) { - return new Pool(options); -} - -module.exports = { createPool, Pool }; diff --git a/tools/nodejs_api/src_js/query_result.js b/tools/nodejs_api/src_js/query_result.js index 24cfe21cb5..6f6ec88e81 100644 --- a/tools/nodejs_api/src_js/query_result.js +++ b/tools/nodejs_api/src_js/query_result.js @@ -76,15 +76,6 @@ class QueryResult { return this._queryResult.getNextSync(); } - /** - * Return the query result as a string (header + rows). For failed queries returns the error message. - * @returns {string} - */ - toString() { - this._checkClosed(); - return this._queryResult.toStringSync(); - } - /** * Iterate through the query result with callback functions. * @param {Function} resultCallback the callback function that is called for each row of the query result. @@ -297,16 +288,13 @@ class QueryResult { } /** - * Internal function to check if the query result or its connection is closed. - * @throws {Error} if the query result is closed or the connection is closed. + * Internal function to check if the query result is closed. + * @throws {Error} if the query result is closed. */ _checkClosed() { if (this._isClosed) { throw new Error("Query result is closed."); } - if (this._connection._isClosed) { - throw new Error("Connection is closed."); - } } } diff --git a/tools/nodejs_api/test/common.js b/tools/nodejs_api/test/common.js index 036a2bd393..ab0a954842 100644 --- a/tools/nodejs_api/test/common.js +++ b/tools/nodejs_api/test/common.js @@ -1,39 +1,8 @@ -const nodeAssert = require("node:assert"); -const fs = require("fs/promises"); -const path = require("path"); -const os = require("os"); - -// Chai-like API on top of node:assert for minimal test changes -const assert = Object.create(nodeAssert); -assert.exists = (val, msg) => nodeAssert.ok(val != null, msg || "expected value to exist"); -assert.notExists = (val, msg) => nodeAssert.ok(val == null, msg || "expected value to not exist"); -assert.isNull = (val, msg) => nodeAssert.strictEqual(val, null, msg); -assert.isNotNull = (val, msg) => nodeAssert.notStrictEqual(val, null, msg); -assert.isTrue = (val, msg) => nodeAssert.strictEqual(val, true, msg); -assert.isFalse = (val, msg) => nodeAssert.strictEqual(val, false, msg); -assert.include = (container, value, msg) => - nodeAssert.ok(container.includes(value), msg || `expected ${container} to include ${value}`); -assert.isEmpty = (val, msg) => - nodeAssert.strictEqual(val.length, 0, msg || `expected empty, got length ${val.length}`); -assert.instanceOf = (obj, Ctor, msg) => - nodeAssert.ok(obj instanceof Ctor, msg || `expected instance of ${Ctor.name}`); -assert.isNumber = (val, msg) => - nodeAssert.strictEqual(typeof val, "number", msg); -assert.isString = (val, msg) => - nodeAssert.strictEqual(typeof val, "string", msg); -assert.isAtLeast = (n, min, msg) => - nodeAssert.ok(n >= min, msg || `expected ${n} >= ${min}`); -assert.lengthOf = (arr, n, msg) => - nodeAssert.strictEqual(arr.length, n, msg || `expected length ${n}, got ${arr.length}`); -assert.equal = (a, b, msg) => nodeAssert.strictEqual(a, b, msg); -assert.notEqual = (a, b, msg) => nodeAssert.notStrictEqual(a, b, msg); -assert.deepEqual = (a, b, msg) => nodeAssert.deepStrictEqual(a, b, msg); -assert.approximately = (actual, expected, delta, msg) => - nodeAssert.ok( - Math.abs(actual - expected) <= delta, - msg || `expected ${actual} to be approximately ${expected} (±${delta})` - ); -global.assert = assert; +global.chai = require("chai"); +global.assert = chai.assert; +global.expect = chai.expect; +chai.should(); +chai.config.includeStack = true; const TEST_INSTALLED = process.env.TEST_INSTALLED || false; if (TEST_INSTALLED) { @@ -46,10 +15,19 @@ if (TEST_INSTALLED) { console.log("Testing locally built version @", lbugPath); } -// Temp dir: os.tmpdir() respects TMPDIR (Unix) and TEMP/TMP (Windows). XDG spec -// does not define a temp directory; industry practice is TMPDIR + mkdtemp (unique names). +const tmp = require("tmp"); +const fs = require("fs/promises"); +const path = require("path"); const initTests = async () => { - const tmpPath = await fs.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); + const dbPath = path.join(tmpPath, "db.kz"); const db = new lbug.Database(dbPath, 1 << 28 /* 256MB */); const conn = new lbug.Connection(db, 4); @@ -70,13 +48,16 @@ const initTests = async () => { .split("\n"); const dataFileExtension = ["csv", "parquet", "npy", "ttl", "nq", "json", "lbug_extension"]; - const dataFileRegex = new RegExp(`"([^"]+\\.(${dataFileExtension.join("|")}))"`, "gi"); + const dataFileRegex = new RegExp(`"([^"]+\\.(${dataFileExtension.join('|')}))"`, "gi"); for (const line of copy) { if (!line || line.trim().length === 0) { - continue; + continue; } + + // handle multiple data files in one line const statement = line.replace(dataFileRegex, `"${tinysnbDir}$1"`); + await conn.query(statement); } @@ -88,7 +69,6 @@ const initTests = async () => { ); global.dbPath = dbPath; - global.tmpPath = tmpPath; global.db = db; global.conn = conn; }; diff --git a/tools/nodejs_api/test/test.js b/tools/nodejs_api/test/test.js index ccafe80d83..4efa4b7e6e 100644 --- a/tools/nodejs_api/test/test.js +++ b/tools/nodejs_api/test/test.js @@ -1,17 +1,8 @@ -const { describe, it, before, after } = require("node:test"); -global.describe = describe; -global.it = it; -global.before = before; -global.after = after; - require("./common.js"); -const path = require("path"); -const fs = require("fs/promises"); - -const importTest = (name, p) => { +const importTest = (name, path) => { describe(name, () => { - require(p); + require(path); }); }; @@ -19,27 +10,12 @@ describe("lbug", () => { before(() => { return initTests(); }); - after(async () => { - if (global.conn && !global.conn._isClosed) { - await global.conn.close().catch(() => {}); - } - if (global.db && !global.db._isClosed) { - await global.db.close().catch(() => {}); - } - if (global.tmpPath) { - await fs.rm(global.tmpPath, { recursive: true }).catch(() => {}); - } - // Native addon may keep the event loop alive; force exit so process doesn't hang - process.exit(0); - }); - importTest("Database", path.join(__dirname, "test_database.js")); - importTest("Connection", path.join(__dirname, "test_connection.js")); - importTest("Query result", path.join(__dirname, "test_query_result.js")); - importTest("Data types", path.join(__dirname, "test_data_type.js")); - importTest("Query parameters", path.join(__dirname, "test_parameter.js")); - importTest("Concurrent query execution", path.join(__dirname, "test_concurrency.js")); - importTest("Version", path.join(__dirname, "test_version.js")); - importTest("Synchronous API", path.join(__dirname, "test_sync_api.js")); - importTest("registerStream / LOAD FROM stream", path.join(__dirname, "test_register_stream.js")); - importTest("Resilience (close during/after use)", path.join(__dirname, "test_resilience.js")); + importTest("Database", "./test_database.js"); + importTest("Connection", "./test_connection.js"); + importTest("Query result", "./test_query_result.js"); + importTest("Data types", "./test_data_type.js"); + importTest("Query parameters", "./test_parameter.js"); + importTest("Concurrent query execution", "./test_concurrency.js"); + importTest("Version", "./test_version.js"); + importTest("Synchronous API", "./test_sync_api.js"); }); diff --git a/tools/nodejs_api/test/test_concurrency.js b/tools/nodejs_api/test/test_concurrency.js index fc611f10f9..8b70776036 100644 --- a/tools/nodejs_api/test/test_concurrency.js +++ b/tools/nodejs_api/test/test_concurrency.js @@ -1,3 +1,5 @@ +const { assert } = require("chai"); + describe("Concurrent query execution within a single connection", function () { it("should dispatch multiple queries concurrently with query strings", async function () { const queryResults = await Promise.all([ @@ -45,36 +47,30 @@ describe("Concurrent query execution across multiple connections", function () { for (let i = 0; i < 5; i++) { connections.push(new lbug.Connection(db)); } - try { - const queryResults = await Promise.all([ - connections[0].query( - "MATCH (a:person) WHERE a.ID = 0 RETURN a.isStudent;" - ), - connections[1].query( - "MATCH (a:person) WHERE a.ID = 2 RETURN a.isStudent;" - ), - connections[2].query( - "MATCH (a:person) WHERE a.ID = 3 RETURN a.isStudent;" - ), - connections[3].query( - "MATCH (a:person) WHERE a.ID = 5 RETURN a.isStudent;" - ), - connections[4].query( - "MATCH (a:person) WHERE a.ID = 7 RETURN a.isStudent;" - ), - ]); - const results = await Promise.all( - queryResults.map((queryResult) => queryResult.getAll()) - ); - assert.isTrue(results[0][0]["a.isStudent"]); - assert.isTrue(results[1][0]["a.isStudent"]); - assert.isFalse(results[2][0]["a.isStudent"]); - assert.isFalse(results[3][0]["a.isStudent"]); - assert.isFalse(results[4][0]["a.isStudent"]); - } finally { - for (const c of connections) { - if (!c._isClosed) await c.close().catch(() => {}); - } - } + const queryResults = await Promise.all([ + connections[0].query( + "MATCH (a:person) WHERE a.ID = 0 RETURN a.isStudent;" + ), + connections[1].query( + "MATCH (a:person) WHERE a.ID = 2 RETURN a.isStudent;" + ), + connections[2].query( + "MATCH (a:person) WHERE a.ID = 3 RETURN a.isStudent;" + ), + connections[3].query( + "MATCH (a:person) WHERE a.ID = 5 RETURN a.isStudent;" + ), + connections[4].query( + "MATCH (a:person) WHERE a.ID = 7 RETURN a.isStudent;" + ), + ]); + const results = await Promise.all( + queryResults.map((queryResult) => queryResult.getAll()) + ); + assert.isTrue(results[0][0]["a.isStudent"]); + assert.isTrue(results[1][0]["a.isStudent"]); + assert.isFalse(results[2][0]["a.isStudent"]); + assert.isFalse(results[3][0]["a.isStudent"]); + assert.isFalse(results[4][0]["a.isStudent"]); }); }); diff --git a/tools/nodejs_api/test/test_connection.js b/tools/nodejs_api/test/test_connection.js index b60ca13cb2..8cd92bf4e4 100644 --- a/tools/nodejs_api/test/test_connection.js +++ b/tools/nodejs_api/test/test_connection.js @@ -1,3 +1,5 @@ +const { assert } = require("chai"); + describe("Connection constructor", function () { it("should create a connection with a valid database object", async function () { const connection = new lbug.Connection(db); @@ -7,7 +9,6 @@ describe("Connection constructor", function () { assert.exists(connection._connection); assert.isTrue(connection._isInitialized); assert.notExists(connection._initPromise); - await connection.close(); }); it("should throw error if the database object is invalid", async function () { @@ -229,8 +230,8 @@ describe("Query", function () { describe("Timeout", function () { it("should abort a query if the timeout is reached", async function () { - const newConn = new lbug.Connection(db); try { + const newConn = new lbug.Connection(db); await newConn.init(); newConn.setQueryTimeout(1); await newConn.query( @@ -239,14 +240,12 @@ describe("Timeout", function () { assert.fail("No error thrown when the query times out."); } catch (err) { assert.equal(err.message, "Interrupted."); - } finally { - if (!newConn._isClosed) await newConn.close().catch(() => {}); } }); it("should allow setting a timeout before the connection is initialized", async function () { - const newConn = new lbug.Connection(db); try { + const newConn = new lbug.Connection(db); newConn.setQueryTimeout(1); await newConn.init(); await newConn.query( @@ -255,81 +254,8 @@ describe("Timeout", function () { assert.fail("No error thrown when the query times out."); } catch (err) { assert.equal(err.message, "Interrupted."); - } finally { - if (!newConn._isClosed) await newConn.close().catch(() => {}); - } - }); -}); - -describe("Interrupt", function () { - it("should abort a long-running query when interrupt() is called", { timeout: 5000 }, async function () { - if (process.platform === "win32") { - this.skip(); - } - const newConn = new lbug.Connection(db); - try { - await newConn.init(); - const longQuery = - "UNWIND RANGE(1, 30000) AS x UNWIND RANGE(1, 30000) AS y RETURN COUNT(x + y);"; - const queryPromise = newConn.query(longQuery); - setTimeout(() => newConn.interrupt(), 100); - try { - await queryPromise; - assert.fail("No error thrown when the query was interrupted."); - } catch (err) { - assert.equal(err.message, "Interrupted."); - } - } finally { - if (!newConn._isClosed) await newConn.close().catch(() => {}); - } - }); -}); - -describe("AbortSignal", function () { - it("should reject with AbortError when signal is already aborted before query starts", async function () { - const ac = new AbortController(); - ac.abort(); - try { - await conn.query("RETURN 1", { signal: ac.signal }); - assert.fail("No error thrown when signal was already aborted."); - } catch (err) { - assert.equal(err.name, "AbortError"); - assert.equal(err.message, "The operation was aborted."); } }); - - it("should reject with AbortError when signal is aborted during query", async function () { - const newConn = new lbug.Connection(db); - try { - await newConn.init(); - const ac = new AbortController(); - const longQuery = - "UNWIND RANGE(1, 30000) AS x UNWIND RANGE(1, 30000) AS y RETURN COUNT(x + y);"; - const queryPromise = newConn.query(longQuery, { signal: ac.signal }); - setTimeout(() => ac.abort(), 100); - try { - await queryPromise; - assert.fail("No error thrown when signal was aborted during query."); - } catch (err) { - assert.equal(err.name, "AbortError"); - } - } finally { - if (!newConn._isClosed) await newConn.close().catch(() => {}); - } - }); - - it("should work with progressCallback in options object", async function () { - let progressCalled = false; - const result = await conn.query("RETURN 1", { - progressCallback: () => { - progressCalled = true; - }, - }); - assert.exists(result); - const rows = Array.isArray(result) ? result : [result]; - assert.isAtLeast(rows.length, 1); - rows.forEach((r) => r.close()); - }); }); describe("Close", function () { diff --git a/tools/nodejs_api/test/test_data_type.js b/tools/nodejs_api/test/test_data_type.js index b42ba44153..68aa682516 100644 --- a/tools/nodejs_api/test/test_data_type.js +++ b/tools/nodejs_api/test/test_data_type.js @@ -1,3 +1,4 @@ +const { assert } = require("chai"); const EPSILON = 1e-6; describe("BOOL", function () { @@ -379,8 +380,8 @@ describe("LIST", function () { assert.equal(result[0]["a.courseScoresPerTerm"].length, 2); assert.equal(result[0]["a.courseScoresPerTerm"][0].length, 2); assert.equal(result[0]["a.courseScoresPerTerm"][1].length, 3); - assert.deepEqual(result[0]["a.courseScoresPerTerm"][0], [10, 8]); - assert.deepEqual(result[0]["a.courseScoresPerTerm"][1], [6, 7, 8]); + assert.deepEqual(result[0]["a.courseScoresPerTerm"][0][(10, 8)]); + assert.deepEqual(result[0]["a.courseScoresPerTerm"][1][(6, 7, 8)]); }); }); @@ -450,7 +451,7 @@ describe("NODE", function () { ); assert.deepEqual(result["courseScoresPerTerm"][0], [10, 8]); assert.deepEqual(result["courseScoresPerTerm"][1], [6, 7, 8]); - assert.deepEqual(result["usedNames"], ["Aida"]); + assert.equal(result["usedNames"], "Aida"); assert.equal(result["_id"]["offset"], 0); assert.equal(result["_id"]["table"], 0); }); diff --git a/tools/nodejs_api/test/test_database.js b/tools/nodejs_api/test/test_database.js index 0235347040..06ba4c24f4 100644 --- a/tools/nodejs_api/test/test_database.js +++ b/tools/nodejs_api/test/test_database.js @@ -1,25 +1,23 @@ +const { assert } = require("chai"); +const tmp = require("tmp"); const process = require("process"); const path = require("path"); -const fs = require("fs"); -const fsp = require("fs/promises"); -const os = require("os"); +const fs = require('fs'); -const { spawn } = require("child_process"); +const spwan = require("child_process").spawn; const openDatabaseOnSubprocess = (dbPath) => { return new Promise((resolve, _) => { const node = process.argv[0]; - // Use env vars so Windows paths with backslashes don't break the -e code string - const env = { ...process.env, LBUG_PATH: lbugPath, DB_PATH: dbPath }; const code = ` (async() => { - const lbug = require(process.env.LBUG_PATH); - const db = new lbug.Database(process.env.DB_PATH, 1 << 28); + const lbug = require("${lbugPath}"); + const db = new lbug.Database("${dbPath}", 1 << 28); await db.init(); console.log("Database initialized."); })(); `; - const child = spawn(node, ["-e", code], { env }); + const child = spwan(node, ["-e", code]); let stdout = ""; let stderr = ""; child.stdout.on("data", (data) => { @@ -36,7 +34,14 @@ const openDatabaseOnSubprocess = (dbPath) => { describe("Database constructor", function () { it("should create a database with a valid path and buffer size", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); assert.exists(testDb); @@ -48,7 +53,14 @@ describe("Database constructor", function () { }); it("should create a database with a valid path and no buffer size", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath); assert.exists(testDb); @@ -58,18 +70,22 @@ describe("Database constructor", function () { assert.isTrue(testDb._isInitialized); assert.notExists(testDb._initPromise); - const testConn = new lbug.Connection(testDb); - const res = await testConn.query("CALL current_setting('checkpoint_threshold') RETURN *"); + // check default config + let res = await conn.query("CALL current_setting('checkpoint_threshold') RETURN *"); assert.equal(res.getNumTuples(), 1); const tuple = await res.getNext(); - assert.isTrue(Number(tuple["checkpoint_threshold"]) > 0); - res.close(); - testConn.close(); - testDb.close(); + assert.isTrue(tuple["checkpoint_threshold"] > 0); }); it("should create a database with auto checkpoint configured", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */, @@ -89,7 +105,14 @@ describe("Database constructor", function () { }); it("should create a database with checkpoint threshold configured", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */, @@ -103,14 +126,21 @@ describe("Database constructor", function () { let res = await conn.query("CALL current_setting('checkpoint_threshold') RETURN *"); assert.equal(res.getNumTuples(), 1); const tuple = await res.getNext(); - assert.equal(Number(tuple["checkpoint_threshold"]), 1234); + assert.equal(tuple["checkpoint_threshold"], 1234); res.close(); conn.close(); testDb.close(); }); it("should create a database with throwOnWalReplayFailure configured", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const walPath = dbPath + ".wal"; fs.writeFileSync(walPath, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); @@ -134,7 +164,14 @@ describe("Database constructor", function () { }); it("should create a database with enableChecksums configured", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); let testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */, @@ -174,7 +211,14 @@ describe("Database constructor", function () { }); it("should create a database in read-only mode", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); assert.exists(testDb); @@ -219,7 +263,14 @@ describe("Database constructor", function () { }); it("should create a database with a valid max DB size", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database( dbPath, @@ -323,10 +374,22 @@ describe("Database constructor", function () { describe("Database close", function () { it("should allow initializing a new database after closing", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + if (process.platform === "win32") { + this._runnable.title += " (skipped: not implemented on Windows)"; + this.skip(); + } + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); + // FIXME: doesn't work properly on windows let subProcessResult = await openDatabaseOnSubprocess(dbPath); assert.notEqual(subProcessResult.code, 0); assert.include( @@ -341,7 +404,14 @@ describe("Database close", function () { }); it("should throw error if the database is closed", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); @@ -355,7 +425,14 @@ describe("Database close", function () { }); it("should close the database if it is initialized", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); @@ -367,7 +444,14 @@ describe("Database close", function () { }); it("should close the database if it is not initialized", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); assert.isFalse(testDb._isInitialized); @@ -378,7 +462,14 @@ describe("Database close", function () { }); it("should close a initializing database", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await Promise.all([testDb.init(), testDb.close()]); @@ -388,7 +479,14 @@ describe("Database close", function () { }); it("should gracefully close a database multiple times", async function () { - const tmpDbPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); + const tmpDbPath = await new Promise((resolve, reject) => { + tmp.dir({ unsafeCleanup: true }, (err, path, _) => { + if (err) { + return reject(err); + } + return resolve(path); + }); + }); const dbPath = path.join(tmpDbPath, "db.kz"); const testDb = new lbug.Database(dbPath, 1 << 28 /* 256MB */); await testDb.init(); @@ -409,17 +507,9 @@ describe("Database close", function () { assert.deepEqual(tuple, { "+(1,1)": 2 }); testDb.closeSync(); assert.isTrue(testDb._isClosed); - assert.throws( - () => conn.querySync("RETURN 1+1"), - Error, - /(Runtime exception:.*parent database is closed|Connection is closed\.)/ - ); + assert.throws(() => conn.querySync("RETURN 1+1"), Error, "Runtime exception: The current operation is not allowed because the parent database is closed."); conn.closeSync(); assert.isTrue(conn._isClosed); - assert.throws( - () => res.resetIterator(), - Error, - /(Runtime exception:.*parent database is closed|Connection is closed\.)/ - ); + assert.throws(() => res.resetIterator(), Error, "Runtime exception: The current operation is not allowed because the parent database is closed."); }); }); diff --git a/tools/nodejs_api/test/test_parameter.js b/tools/nodejs_api/test/test_parameter.js index 8398cdbcaf..1b2815aa33 100644 --- a/tools/nodejs_api/test/test_parameter.js +++ b/tools/nodejs_api/test/test_parameter.js @@ -1,3 +1,5 @@ +const { assert } = require("chai"); + describe("BOOL", function () { it("should transform booleans as BOOL parameter", async function () { const preparedStatement = await conn.prepare( @@ -71,7 +73,7 @@ describe("UINT64", function () { 1: 10000000000000000000, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT64)"], 10000000000000000000); + assert.equal(result[0]["CAST($1, UINT64)"], "10000000000000000000"); }); }); @@ -82,7 +84,7 @@ describe("UINT32", function () { 1: 4294967295, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT32)"], 4294967295); + assert.equal(result[0]["CAST($1, UINT32)"], "4294967295"); }); }); @@ -93,7 +95,7 @@ describe("UINT16", function () { 1: 65535, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT16)"], 65535); + assert.equal(result[0]["CAST($1, UINT16)"], "65535"); }); }); @@ -104,7 +106,7 @@ describe("UINT8", function () { 1: 255, }); const result = await queryResult.getAll(); - assert.equal(result[0]["CAST($1, UINT8)"], 255); + assert.equal(result[0]["CAST($1, UINT8)"], "255"); }); }); diff --git a/tools/nodejs_api/test/test_pool.js b/tools/nodejs_api/test/test_pool.js deleted file mode 100644 index 35f9f54e1d..0000000000 --- a/tools/nodejs_api/test/test_pool.js +++ /dev/null @@ -1,161 +0,0 @@ -require("./common.js"); -const path = require("path"); -const fsp = require("fs/promises"); -const os = require("os"); - -describe("Connection pool", function () { - let pool; - let tmpDir; - - before(async function () { - await initTests(); - tmpDir = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-pool-")); - }); - - after(async function () { - if (tmpDir) await fsp.rm(tmpDir, { recursive: true }).catch(() => {}); - }); - - afterEach(async function () { - if (pool && !pool._closed) { - await pool.close(); - } - }); - - it("createPool requires maxSize", function () { - assert.throws(() => lbug.createPool({}), /maxSize/); - assert.throws(() => lbug.createPool({ databasePath: ":memory:" }), /maxSize/); - assert.doesNotThrow(() => lbug.createPool({ maxSize: 5 })); - }); - - it("pool.run(fn) runs with a connection and releases on success", async function () { - pool = lbug.createPool({ - databasePath: path.join(tmpDir, "p1.kz"), - maxSize: 2, - databaseOptions: { bufferManagerSize: 1 << 24 }, - }); - const result = await pool.run(async (conn) => { - const r = await conn.query("RETURN 1 AS x"); - const rows = await r.getAll(); - r.close(); - return rows; - }); - assert.lengthOf(result, 1); - assert.strictEqual(result[0].x, 1); - }); - - it("pool.run(fn) releases on throw", async function () { - pool = lbug.createPool({ - databasePath: path.join(tmpDir, "p2.kz"), - maxSize: 2, - databaseOptions: { bufferManagerSize: 1 << 24 }, - }); - let err; - try { - await pool.run(async () => { - throw new Error("fail"); - }); - } catch (e) { - err = e; - } - assert.instanceOf(err, Error); - assert.include(err.message, "fail"); - const again = await pool.run(async (conn) => { - const r = await conn.query("RETURN 2 AS y"); - const rows = await r.getAll(); - r.close(); - return rows; - }); - assert.lengthOf(again, 1); - assert.strictEqual(again[0].y, 2); - }); - - it("acquire/release and multiple concurrent cycles", async function () { - pool = lbug.createPool({ - databasePath: path.join(tmpDir, "p3.kz"), - maxSize: 3, - databaseOptions: { bufferManagerSize: 1 << 24 }, - }); - const conn1 = await pool.acquire(); - const conn2 = await pool.acquire(); - const conn3 = await pool.acquire(); - const r1 = await conn1.query("RETURN 1 AS a"); - const r2 = await conn2.query("RETURN 2 AS b"); - const r3 = await conn3.query("RETURN 3 AS c"); - assert.strictEqual((await r1.getAll())[0].a, 1); - assert.strictEqual((await r2.getAll())[0].b, 2); - assert.strictEqual((await r3.getAll())[0].c, 3); - r1.close(); - r2.close(); - r3.close(); - pool.release(conn1); - pool.release(conn2); - pool.release(conn3); - const conn4 = await pool.acquire(); - const r4 = await conn4.query("RETURN 4 AS d"); - assert.strictEqual((await r4.getAll())[0].d, 4); - r4.close(); - pool.release(conn4); - }); - - it("pool does not exceed maxSize", async function () { - pool = lbug.createPool({ - databasePath: path.join(tmpDir, "p4.kz"), - maxSize: 2, - databaseOptions: { bufferManagerSize: 1 << 24 }, - }); - const c1 = await pool.acquire(); - const c2 = await pool.acquire(); - let resolved = false; - const p3 = pool.acquire().then((c) => { - resolved = true; - pool.release(c); - }); - await new Promise((r) => setImmediate(r)); - assert.isFalse(resolved); - pool.release(c1); - await p3; - assert.isTrue(resolved); - pool.release(c2); - }); - - it("acquire() rejects after acquireTimeoutMillis when no connection available", async function () { - pool = lbug.createPool({ - databasePath: path.join(tmpDir, "p5a.kz"), - maxSize: 1, - acquireTimeoutMillis: 80, - databaseOptions: { bufferManagerSize: 1 << 24 }, - }); - const c1 = await pool.acquire(); - let timeoutErr; - try { - await pool.acquire(); - } catch (e) { - timeoutErr = e; - } - assert.instanceOf(timeoutErr, Error); - assert.include(timeoutErr.message, "timed out"); - pool.release(c1); - }); - - it("pool.close() prevents new acquire and closes all", async function () { - pool = lbug.createPool({ - databasePath: path.join(tmpDir, "p5.kz"), - maxSize: 2, - databaseOptions: { bufferManagerSize: 1 << 24 }, - }); - await pool.run(async (conn) => { - const r = await conn.query("RETURN 1"); - r.close(); - }); - await pool.close(); - let closedErr; - try { - await pool.acquire(); - } catch (e) { - closedErr = e; - } - assert.instanceOf(closedErr, Error); - assert.include(closedErr.message, "closed"); - }); -}); diff --git a/tools/nodejs_api/test/test_query_result.js b/tools/nodejs_api/test/test_query_result.js index 5d384c3939..dab58580b5 100644 --- a/tools/nodejs_api/test/test_query_result.js +++ b/tools/nodejs_api/test/test_query_result.js @@ -1,3 +1,5 @@ +const { assert } = require("chai"); + const PERSON_IDS = [0, 2, 3, 5, 7, 8, 9, 10]; describe("Reset iterator", function () { @@ -64,30 +66,22 @@ describe("Get next", function () { } }); - it("should return null when no more tuples", async function () { + it("should throw an error if there is no next tuple", async function () { const queryResult = await conn.query( "MATCH (a:person) RETURN a.ID ORDER BY a.ID" ); for (let i = 0; i < 8; ++i) { await queryResult.getNext(); } - const exhausted = await queryResult.getNext(); - assert.isNull(exhausted, "getNext() returns null when no more tuples"); - }); - - it("getNext() returns null exactly when hasNext() is false", async function () { - const queryResult = await conn.query( - "MATCH (a:person) RETURN a.ID ORDER BY a.ID" - ); - let count = 0; - while (queryResult.hasNext()) { - const row = await queryResult.getNext(); - assert.isNotNull(row, "getNext() must return value when hasNext() is true"); - count++; + try { + await queryResult.getNext(); + assert.fail("No error thrown when there is no next tuple"); + } catch (err) { + assert.equal( + err.message, + "Runtime exception: No more tuples in QueryResult, Please check hasNext() before calling getNext()." + ); } - assert.equal(count, 8); - const afterExhausted = await queryResult.getNext(); - assert.isNull(afterExhausted, "getNext() must return null when hasNext() was false"); }); }); @@ -187,7 +181,7 @@ describe("Get column data types", function () { p.courseScoresPerTerm` ); const columnDataTypes = await queryResult.getColumnDataTypes(); - const expectedResultArr = [ + const ansexpectedResultArr = [ "INT64", "STRING", "BOOL", @@ -198,7 +192,7 @@ describe("Get column data types", function () { "INT64[]", "INT64[][]", ]; - assert.deepEqual(columnDataTypes, expectedResultArr); + assert.deepEqual(columnDataTypes, ansexpectedResultArr); }); }); diff --git a/tools/nodejs_api/test/test_register_stream.js b/tools/nodejs_api/test/test_register_stream.js deleted file mode 100644 index 6dea88b437..0000000000 --- a/tools/nodejs_api/test/test_register_stream.js +++ /dev/null @@ -1,79 +0,0 @@ -describe("registerStream / LOAD FROM stream", function () { - it("should LOAD FROM registered stream and return rows", async function () { - async function* rowSource() { - yield [1, "a"]; - yield [2, "b"]; - yield [3, "c"]; - } - await conn.registerStream("mystream", rowSource(), { - columns: [ - { name: "id", type: "INT64" }, - { name: "label", type: "STRING" }, - ], - }); - try { - const result = await conn.query("LOAD FROM mystream RETURN *"); - const rows = Array.isArray(result) ? result : [result]; - assert.isAtLeast(rows.length, 1); - const r = rows[0]; - assert.isTrue(r.hasNext()); - const row1 = await r.getNext(); - assert.exists(row1); - assert.equal(row1["id"], 1); - assert.equal(row1["label"], "a"); - const row2 = await r.getNext(); - assert.equal(row2["id"], 2); - assert.equal(row2["label"], "b"); - const row3 = await r.getNext(); - assert.equal(row3["id"], 3); - assert.equal(row3["label"], "c"); - assert.isNull(await r.getNext()); - } finally { - conn.unregisterStream("mystream"); - } - }); - - it("should LOAD FROM stream with object rows (column order from schema)", async function () { - async function* objectRowSource() { - yield { id: 10, label: "x" }; - yield { label: "y", id: 20 }; - } - await conn.registerStream("objstream", objectRowSource(), { - columns: [ - { name: "id", type: "INT64" }, - { name: "label", type: "STRING" }, - ], - }); - try { - const result = await conn.query("LOAD FROM objstream RETURN *"); - const r = Array.isArray(result) ? result[0] : result; - const row1 = await r.getNext(); - assert.isNotNull(row1, "expected first row from stream"); - assert.equal(row1["id"], 10); - assert.equal(row1["label"], "x"); - const row2 = await r.getNext(); - assert.isNotNull(row2, "expected second row from stream"); - assert.equal(row2["id"], 20); - assert.equal(row2["label"], "y"); - assert.isNull(await r.getNext()); - } finally { - conn.unregisterStream("objstream"); - } - }); - - it("should unregisterStream by name", async function () { - async function* empty() { - if (false) yield []; - } - await conn.registerStream("tmpstream", empty(), { - columns: [{ name: "x", type: "INT64" }], - }); - conn.unregisterStream("tmpstream"); - try { - await conn.query("LOAD FROM tmpstream RETURN *"); - assert.fail("Expected error when loading from unregistered stream."); - } catch (e) { - assert.include(e.message, "not in scope"); - } - }); -}); diff --git a/tools/nodejs_api/test/test_resilience.js b/tools/nodejs_api/test/test_resilience.js deleted file mode 100644 index d53f3afa29..0000000000 --- a/tools/nodejs_api/test/test_resilience.js +++ /dev/null @@ -1,184 +0,0 @@ -"use strict"; - -const path = require("path"); -const fsp = require("fs/promises"); -const os = require("os"); - -/** - * Resilience tests: close connection/database during or after operations. - * Goal: no crashes (SIGSEGV, native abort); all failures must surface as JS errors. - */ -function withTempDb(fn) { - return async function () { - const tmpPath = await fsp.mkdtemp(path.join(os.tmpdir(), "lbug-")); - const dbPath = path.join(tmpPath, "db.kz"); - const testDb = new lbug.Database(dbPath, 1 << 26 /* 64MB */); - await testDb.init(); - const testConn = new lbug.Connection(testDb); - await testConn.init(); - try { - await fn.call(this, testDb, testConn); - } finally { - if (!testDb._isClosed) await testDb.close().catch(() => {}); - if (!testConn._isClosed) await testConn.close().catch(() => {}); - await fsp.rm(tmpPath, { recursive: true }).catch(() => {}); - } - }; -} - -describe("Resilience (close during/after use)", { timeout: 10000 }, function () { - it("query rejects when connection is closed while query is in flight", withTempDb(async (testDb, testConn) => { - const longQuery = "UNWIND range(1, 20000) AS x UNWIND range(1, 2000) AS y RETURN count(*)"; - const queryPromise = testConn.query(longQuery); - await new Promise((r) => setTimeout(r, 80)); - testConn.closeSync(); - const timeoutMs = 2000; - const timeoutPromise = new Promise((_, reject) => { - setTimeout(() => reject(new Error(`Expected query to reject within ${timeoutMs}ms when connection was closed (timed out).`)), timeoutMs); - }); - try { - await Promise.race([queryPromise, timeoutPromise]); - assert.fail("Expected query to reject when connection was closed during execution."); - } catch (err) { - if ((err.message || "").includes("timed out")) throw err; - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - const ok = msg.includes("closed") || msg.includes("not allowed") || msg.includes("runtime"); - assert.isTrue(ok, `Expected error about closed/not allowed, got: ${err.message}`); - } - })); - - // Database close is synchronous and blocks until in-flight work completes (core behavior). - // So we cannot observe "query rejects when database is closed" without a non-blocking close. - it.skip("query rejects when database is closed while query is in flight", withTempDb(async (testDb, testConn) => { - const longQuery = "UNWIND range(1, 20000) AS x UNWIND range(1, 2000) AS y RETURN count(*)"; - const queryPromise = testConn.query(longQuery); - await new Promise((r) => setTimeout(r, 120)); - testDb.closeSync(); - const timeoutMs = 5000; - const timeoutPromise = new Promise((_, reject) => { - setTimeout(() => reject(new Error(`Expected query to reject within ${timeoutMs}ms when database was closed (timed out).`)), timeoutMs); - }); - try { - await Promise.race([queryPromise, timeoutPromise]); - assert.fail("Expected query to reject when database was closed during execution."); - } catch (err) { - if ((err.message || "").includes("timed out")) throw err; - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - const ok = msg.includes("closed") || msg.includes("not allowed") || msg.includes("runtime"); - assert.isTrue(ok, `Expected error about closed/not allowed, got: ${err.message}`); - } - })); - - it("getNext() after connection closed throws and does not crash", withTempDb(async (testDb, testConn) => { - const res = await testConn.query("RETURN 1 AS x"); - const row = await res.getNext(); - assert.equal(row.x, 1); - testConn.closeSync(); - try { - await res.getNext(); - assert.fail("Expected getNext() to throw after connection closed."); - } catch (err) { - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); - } - })); - - it("hasNext() after connection closed throws and does not crash", withTempDb(async (testDb, testConn) => { - const res = await testConn.query("RETURN 1 AS x"); - assert.isTrue(res.hasNext()); - testConn.closeSync(); - try { - res.hasNext(); - assert.fail("Expected hasNext() to throw after connection closed."); - } catch (err) { - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); - } - })); - - it("getNext() after database closed throws and does not crash", withTempDb(async (testDb, testConn) => { - const res = await testConn.query("RETURN 1 AS x"); - await res.getNext(); - testDb.closeSync(); - try { - await res.getNext(); - assert.fail("Expected getNext() to throw after database closed."); - } catch (err) { - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); - } - })); - - it("hasNext() after database closed throws and does not crash", withTempDb(async (testDb, testConn) => { - const res = await testConn.query("RETURN 1 AS x"); - testDb.closeSync(); - try { - res.hasNext(); - assert.fail("Expected hasNext() to throw after database closed."); - } catch (err) { - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); - } - })); - - it("registerStream then close connection then query throws before running", withTempDb(async (testDb, testConn) => { - async function* gen() { - yield [1]; - } - await testConn.registerStream("s", gen(), { columns: [{ name: "x", type: "INT64" }] }); - testConn.closeSync(); - try { - await testConn.query("LOAD FROM s RETURN *"); - assert.fail("Expected query to throw when connection is already closed."); - } catch (err) { - assert.instanceOf(err, Error); - assert.include((err.message || "").toLowerCase(), "closed"); - } - })); - - it("close connection while iterating result: second getNext throws", withTempDb(async (testDb, testConn) => { - const res = await testConn.query("UNWIND [1,2,3] AS x RETURN x"); - const a = await res.getNext(); - assert.equal(a.x, 1); - testConn.closeSync(); - try { - await res.getNext(); - assert.fail("Expected getNext() to throw after connection closed mid-iteration."); - } catch (err) { - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); - } - })); - - it("query after connection closed throws immediately (no native call)", async function () { - const testConn = new lbug.Connection(db); - await testConn.init(); - await testConn.close(); - try { - await testConn.query("RETURN 1"); - assert.fail("Expected query to throw when connection is closed."); - } catch (err) { - assert.equal(err.message, "Connection is closed."); - } - }); - - it("getNextSync after database closed throws", withTempDb(async (testDb, testConn) => { - const res = await testConn.query("RETURN 1 AS x"); - testDb.closeSync(); - try { - res.getNextSync(); - assert.fail("Expected getNextSync() to throw after database closed."); - } catch (err) { - assert.instanceOf(err, Error); - const msg = (err.message || "").toLowerCase(); - assert.isTrue(msg.includes("closed") || msg.includes("not allowed"), `Expected closed/not allowed, got: ${err.message}`); - } - })); -}); diff --git a/tools/nodejs_api/test/test_sync_api.js b/tools/nodejs_api/test/test_sync_api.js index be1e62f7b9..25667227e3 100644 --- a/tools/nodejs_api/test/test_sync_api.js +++ b/tools/nodejs_api/test/test_sync_api.js @@ -1,3 +1,5 @@ +const { assert } = require("chai"); + const PERSON_IDS = [0, 2, 3, 5, 7, 8, 9, 10]; describe("Query execution", function () { diff --git a/tools/nodejs_api/test/test_version.js b/tools/nodejs_api/test/test_version.js index 7057c15084..f59df235d0 100644 --- a/tools/nodejs_api/test/test_version.js +++ b/tools/nodejs_api/test/test_version.js @@ -1,3 +1,5 @@ +const { assert } = require("chai"); + describe("Get version", function () { it("should get the version of the library", function () { assert.isString(lbug.VERSION);