diff --git a/.github/agents/copilot-instructions.md b/.github/agents/copilot-instructions.md index 3843b2d41..0453f363c 100644 --- a/.github/agents/copilot-instructions.md +++ b/.github/agents/copilot-instructions.md @@ -1,6 +1,6 @@ # KalamDB Development Guidelines -Auto-generated from all feature plans. Last updated: 2026-04-20 +Auto-generated from all feature plans. Last updated: 2026-05-06 ## Active Technologies - Rust 1.90+ (edition 2021) + DataFusion 40.0, Apache Arrow 52.0, RocksDB 0.24, Actix-Web 4.4, DashMap 5, serde 1.0, tokio 1.48 (027-pg-transactions) @@ -9,6 +9,8 @@ Auto-generated from all feature plans. Last updated: 2026-04-20 - RocksDB-backed `system.users` via `IndexedEntityStore`; broader platform storage remains RocksDB + Parquet through existing abstractions (028-auth-integration) - Rust 1.92+ (edition 2021) across backend crates and CLI + DataFusion 53.1.0 (`datafusion`, `datafusion-datasource`, `datafusion-common`, `datafusion-expr`), Arrow 58.1.0, Parquet 58.1.0, object_store 0.13.2, tokio 1.51, RocksDB 0.24, Actix-Web 4.13, moka plan cache (029-datafusion-modernization) - RocksDB hot path plus manifest-directed Parquet cold storage via `kalamdb-filestore`, `StorageCached`, and `ManifestAccessPlanner` (029-datafusion-modernization) +- TypeScript 6.0.x, React 19.2, Node.js 18+ for package build/tes + React 19, React DOM 19, `@kalamdb/client`, `@kalamdb/orm`, `drizzle-orm`, Vitest, React Testing Library (030-react-live-queries) +- Existing KalamDB HTTP/WebSocket APIs via `@kalamdb/client`; no new persistent storage (030-react-live-queries) - Rust 1.92+ (edition 2021) for backend and PostgreSQL extension crates + DataFusion 40.0, Apache Arrow 52.0, Apache Parquet 52.0, RocksDB 0.24, Actix-Web 4.4, tonic/prost for pg RPC transport, DashMap for concurrent registries (027-pg-transactions) @@ -28,9 +30,9 @@ cargo test [ONLY COMMANDS FOR ACTIVE TECHNOLOGIES][ONLY COMMANDS FOR ACTIVE TECH Rust 1.92+ (edition 2021) for backend and PostgreSQL extension crates: Follow standard conventions ## Recent Changes +- 030-react-live-queries: Added TypeScript 6.0.x, React 19.2, Node.js 18+ for package build/tes + React 19, React DOM 19, `@kalamdb/client`, `@kalamdb/orm`, `drizzle-orm`, Vitest, React Testing Library - 029-datafusion-modernization: Added Rust 1.92+ (edition 2021) across backend crates and CLI + DataFusion 53.1.0 (`datafusion`, `datafusion-datasource`, `datafusion-common`, `datafusion-expr`), Arrow 58.1.0, Parquet 58.1.0, object_store 0.13.2, tokio 1.51, RocksDB 0.24, Actix-Web 4.13, moka plan cache - 028-auth-integration: Added Rust 1.92+ (edition 2021) for backend, CLI, link-common, and Dart bridge; TypeScript/JavaScript ES2020+ and Dart only for downstream contract consumers and docs + Actix-Web 4.4, jsonwebtoken 9.2, kalamdb-auth OIDC/JWKS validator, kalamdb-commons typed models, kalamdb-store IndexedEntityStore, tokio, serde, link-common, flutter_rust_bridge bridge models -- 027-pg-transactions: Added Rust 1.90+ (edition 2021) + DataFusion 40.0, Apache Arrow 52.0, RocksDB 0.24, Actix-Web 4.4, DashMap 5, serde 1.0, tokio 1.48 diff --git a/.github/workflows/dart-sdk.yml b/.github/workflows/dart-sdk.yml new file mode 100644 index 000000000..714072011 --- /dev/null +++ b/.github/workflows/dart-sdk.yml @@ -0,0 +1,364 @@ +name: Dart SDK + +on: + push: + branches: [main, dev] + paths: + - 'link/sdks/dart/**' + - 'link/kalam-link-dart/**' + - 'link/link-common/**' + - 'backend/server.example.toml' + - 'versions.json' + - 'scripts/versions.py' + - '.github/workflows/dart-sdk.yml' + pull_request: + paths: + - 'link/sdks/dart/**' + - 'link/kalam-link-dart/**' + - 'link/link-common/**' + - 'backend/server.example.toml' + - 'versions.json' + - 'scripts/versions.py' + - '.github/workflows/dart-sdk.yml' + workflow_dispatch: + inputs: + publish: + description: "Publish Dart SDK to pub.dev" + type: boolean + required: false + default: false + workflow_call: + inputs: + publish: + description: "Publish Dart SDK to pub.dev" + type: boolean + required: false + default: false + +permissions: + contents: write + +env: + CARGO_TERM_COLOR: always + RUST_VERSION: "1.92.0" + RUSTC_WRAPPER: "" + CARGO_BUILD_RUSTC_WRAPPER: "" + +jobs: + resolve_versions: + name: Resolve Dart SDK Version + runs-on: ubuntu-latest + outputs: + dart_version: ${{ steps.versions.outputs.dart_version }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Verify versions.json + shell: bash + run: | + set -euo pipefail + python3 scripts/versions.py verify + + - name: Resolve versions from versions.json + id: versions + shell: bash + run: | + set -euo pipefail + python3 scripts/versions.py github-outputs --github-output "$GITHUB_OUTPUT" --repository "$GITHUB_REPOSITORY" + + test: + name: Dart SDK Tests + runs-on: ubuntu-latest + needs: resolve_versions + outputs: + total: ${{ steps.parse_badge.outputs.total }} + passed: ${{ steps.parse_badge.outputs.passed }} + failed: ${{ steps.parse_badge.outputs.failed }} + message: ${{ steps.parse_badge.outputs.message }} + color: ${{ steps.parse_badge.outputs.color }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Install system dependencies + shell: bash + run: | + sudo apt-get update + sudo apt-get install -y --no-install-recommends \ + clang libclang-dev pkg-config libssl-dev + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + + - name: Cache Rust (dart bridge) + uses: Swatinem/rust-cache@v2 + with: + shared-key: dart-bridge + cache-on-failure: true + workspaces: link -> link/target + + - name: Setup Flutter + uses: subosito/flutter-action@v2 + with: + channel: stable + + - name: Build local SDK test server binary + shell: bash + run: | + set -euo pipefail + cargo build --manifest-path backend/Cargo.toml --bin kalamdb-server + chmod +x backend/target/debug/kalamdb-server + + - name: Start SDK test server + shell: bash + run: | + set -euo pipefail + cp backend/server.example.toml server.toml + sed -i 's|data_path = "./data"|data_path = "./test-data"|g' server.toml + sed -i 's|logs_path = "./logs"|logs_path = "./test-data/logs"|g' server.toml + sed -i 's|jwt_secret = ".*"|jwt_secret = "sdk-test-secret-key-minimum-32-characters-long"|g' server.toml + mkdir -p test-data/rocksdb test-data/storage test-data/logs + ./backend/target/debug/kalamdb-server server.toml > dart-sdk-server.log 2>&1 & + SERVER_PID=$! + echo "SERVER_PID=$SERVER_PID" >> "$GITHUB_ENV" + for i in {1..120}; do + if curl -sf http://localhost:8080/health > /dev/null 2>&1 || curl -sf http://localhost:8080/v1/api/healthcheck > /dev/null 2>&1; then + echo "✅ SDK test server ready (${i}s)" + if [[ -s dart-sdk-server.log ]]; then + echo "Recent server log output:" + tail -n 40 dart-sdk-server.log || true + fi + exit 0 + fi + kill -0 "$SERVER_PID" 2>/dev/null || { echo "❌ Server died"; cat dart-sdk-server.log; exit 1; } + echo " Waiting... ($i/120)" + sleep 1 + done + echo "❌ Timed out waiting for SDK test server" + cat dart-sdk-server.log || true + exit 1 + + - name: Run Dart SDK tests + id: run_tests + continue-on-error: true + shell: bash + working-directory: link/sdks/dart + env: + KALAMDB_URL: "http://localhost:8080" + KALAMDB_USER: "admin" + KALAMDB_PASSWORD: "kalamdb123" + KALAMDB_ROOT_PASSWORD: "kalamdb123" + DART_SDK_MODELS_MACHINE_OUTPUT: ${{ github.workspace }}/dart-sdk-models-machine.jsonl + DART_SDK_E2E_MACHINE_OUTPUT: ${{ github.workspace }}/dart-sdk-e2e-machine.jsonl + run: | + set -euo pipefail + bash ./test.sh + + - name: Parse Dart SDK test counts + if: always() + id: parse_badge + shell: bash + env: + STEP_OUTCOME: ${{ steps.run_tests.outcome }} + run: | + set -euo pipefail + python3 <<'PYTHON' + import json + import os + from pathlib import Path + + def iter_events(path: Path): + if not path.exists(): + return + for raw_line in path.read_text(encoding="utf-8", errors="replace").splitlines(): + line = raw_line.strip() + if not line: + continue + try: + parsed = json.loads(line) + except json.JSONDecodeError: + continue + events = parsed if isinstance(parsed, list) else [parsed] + for event in events: + if isinstance(event, dict): + yield event + + hidden = {} + total = 0 + passed = 0 + failed = 0 + + for path_str in ("dart-sdk-models-machine.jsonl", "dart-sdk-e2e-machine.jsonl"): + for event in iter_events(Path(path_str)): + event_type = event.get("type") + if event_type == "testStart": + test_info = event.get("test", {}) + test_id = test_info.get("id") + if test_id is not None: + hidden[test_id] = bool(test_info.get("hidden", False)) + elif event_type == "testDone": + test_id = event.get("testID") + if hidden.get(test_id, False): + continue + total += 1 + result = event.get("result") + if result == "success": + passed += 1 + elif result in {"failure", "error"}: + failed += 1 + + outcome = os.environ.get("STEP_OUTCOME", "failure") + if total == 0 and outcome == "success": + message = "passed" + color = "brightgreen" + elif total == 0: + message = "failed" + color = "red" + else: + message = f"{passed}/{total} passed" if failed == 0 and outcome == "success" else f"{passed}/{total} passed, {failed} failed" + color = "brightgreen" if failed == 0 and outcome == "success" else "red" + + with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as handle: + handle.write(f"total={total}\n") + handle.write(f"passed={passed}\n") + handle.write(f"failed={failed}\n") + handle.write(f"message={message}\n") + handle.write(f"color={color}\n") + PYTHON + + - name: Print Dart SDK server log + if: always() + shell: bash + run: | + echo "=== Dart SDK server log ===" + cat dart-sdk-server.log || true + + - name: Stop SDK test server + if: always() + shell: bash + run: | + [[ -n "${SERVER_PID:-}" ]] && kill "$SERVER_PID" 2>/dev/null || true + + - name: Upload server log + if: always() + uses: actions/upload-artifact@v6 + with: + name: dart-sdk-server-log + path: dart-sdk-server.log + if-no-files-found: ignore + + - name: Upload Dart SDK test output + if: always() + uses: actions/upload-artifact@v6 + with: + name: dart-sdk-test-output + path: | + dart-sdk-models-machine.jsonl + dart-sdk-e2e-machine.jsonl + if-no-files-found: ignore + + - name: Fail if Dart SDK tests failed + if: always() && steps.run_tests.outcome != 'success' + shell: bash + run: | + echo "Dart SDK tests failed" >&2 + exit 1 + + update_badge: + name: Update Dart SDK Badge + runs-on: ubuntu-latest + needs: test + if: ${{ always() && github.ref == 'refs/heads/main' && needs.test.result != 'cancelled' }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Update Dart SDK badge file + shell: bash + run: | + set -euo pipefail + + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + + render_badge() { + mkdir -p .github/badges + + cat > .github/badges/sdk-dart-tests.json <&2 + exit 1 + + publish: + name: Publish Dart SDK to pub.dev + runs-on: ubuntu-latest + needs: test + if: ${{ inputs.publish }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Setup Flutter + uses: subosito/flutter-action@v2 + with: + channel: stable + + - name: Configure pub.dev credentials + shell: bash + env: + PUB_DEV_CREDENTIALS_JSON: ${{ secrets.PUB_DEV_CREDENTIALS_JSON }} + run: | + set -euo pipefail + if [[ -z "${PUB_DEV_CREDENTIALS_JSON}" ]]; then + echo "❌ Missing required secret: PUB_DEV_CREDENTIALS_JSON" + exit 1 + fi + + mkdir -p "$HOME/.pub-cache" + printf '%s' "$PUB_DEV_CREDENTIALS_JSON" > "$HOME/.pub-cache/credentials.json" + chmod 600 "$HOME/.pub-cache/credentials.json" + + - name: Publish Dart package + shell: bash + working-directory: link/sdks/dart + run: | + set -euo pipefail + chmod +x ./publish.sh + ./publish.sh \ No newline at end of file diff --git a/.github/workflows/orm.yml b/.github/workflows/orm.yml deleted file mode 100644 index ca176f321..000000000 --- a/.github/workflows/orm.yml +++ /dev/null @@ -1,94 +0,0 @@ -name: ORM - -on: - push: - branches: [main] - paths: - - 'link/sdks/typescript/orm/**' - - '.github/workflows/orm.yml' - pull_request: - paths: - - 'link/sdks/typescript/orm/**' - - '.github/workflows/orm.yml' - -jobs: - test: - name: Test - runs-on: ubuntu-latest - - services: - kalamdb: - image: jamals86/kalamdb:latest - ports: - - 8088:8080 - env: - KALAMDB_JWT_SECRET: "ci-test-secret-at-least-32-chars-long" - KALAMDB_ALLOW_REMOTE_SETUP: "true" - - env: - RUST_VERSION: "1.92.0" - WASM_PACK_VERSION: "0.14.0" - - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ env.RUST_VERSION }} - targets: wasm32-unknown-unknown - - - name: Cache Rust - uses: Swatinem/rust-cache@v2 - with: - shared-key: orm-client-build - cache-on-failure: true - - - name: Install wasm-pack - run: cargo install wasm-pack --version "$WASM_PACK_VERSION" --locked - - - name: Set up Node.js - uses: actions/setup-node@v4 - with: - node-version: "22" - - - name: Build @kalamdb/client from source - working-directory: link/sdks/typescript/client - run: npm install && npm run build - - - name: Wait for KalamDB - run: | - for i in $(seq 1 30); do - if curl -sf http://localhost:8088/v1/api/healthcheck 2>/dev/null || curl -sf -o /dev/null -w "%{http_code}" http://localhost:8088/v1/api/healthcheck 2>/dev/null | grep -q "403"; then - echo "KalamDB is ready" - break - fi - echo "Waiting for KalamDB... ($i)" - sleep 2 - done - - - name: Setup KalamDB admin user - run: | - curl -fsS -X POST http://localhost:8088/v1/api/auth/setup \ - -H "Content-Type: application/json" \ - -d '{"username":"admin","password":"testpass123","root_password":"testpass123"}' - - - name: Install dependencies - working-directory: link/sdks/typescript/orm - run: npm install - - - name: Build - working-directory: link/sdks/typescript/orm - run: npm run build - - - name: Run unit tests - working-directory: link/sdks/typescript/orm - run: node --test tests/file-column.test.mjs tests/strip-defaults.test.mjs tests/temporal-normalize.test.mjs - - - name: Run integration tests - working-directory: link/sdks/typescript/orm - env: - KALAMDB_TEST_URL: http://localhost:8088 - KALAMDB_TEST_USER: admin - KALAMDB_TEST_PASSWORD: testpass123 - run: node --test tests/driver.test.mjs tests/generate.test.mjs tests/cli.test.mjs tests/live.test.mjs diff --git a/.github/workflows/python-sdk.yml b/.github/workflows/python-sdk.yml index 3104a2f00..59b1504e7 100644 --- a/.github/workflows/python-sdk.yml +++ b/.github/workflows/python-sdk.yml @@ -21,6 +21,13 @@ on: type: boolean required: false default: false + workflow_call: + inputs: + publish: + description: "Publish wheels to PyPI" + type: boolean + required: false + default: false env: CARGO_TERM_COLOR: always @@ -126,7 +133,7 @@ jobs: name: Publish to PyPI runs-on: ubuntu-latest needs: [build-wheels, build-sdist] - if: github.event_name == 'workflow_dispatch' && github.event.inputs.publish == 'true' + if: ${{ inputs.publish }} environment: pypi permissions: id-token: write diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 05c50e2a6..0ed643226 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -52,9 +52,56 @@ env: CARGO_BUILD_RUSTC_WRAPPER: "" jobs: + sync_versions_manifest: + name: Sync Versions Manifest + runs-on: ubuntu-latest + outputs: + commit_sha: ${{ steps.commit.outputs.commit_sha }} + changed: ${{ steps.commit.outputs.changed }} + + steps: + - name: Checkout + uses: actions/checkout@v6 + with: + fetch-depth: 0 + ref: ${{ github.ref_name }} + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Sync versions.json + shell: bash + run: | + set -euo pipefail + python3 scripts/versions.py sync --write + + - name: Commit versions manifest + id: commit + shell: bash + run: | + set -euo pipefail + + if git diff --quiet -- versions.json; then + echo "changed=false" >> "$GITHUB_OUTPUT" + echo "commit_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" + exit 0 + fi + + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git add versions.json + git commit -m "chore: update versions manifest" + git push origin "HEAD:${GITHUB_REF_NAME}" + + echo "changed=true" >> "$GITHUB_OUTPUT" + echo "commit_sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" + read_version: - name: Read Version from Cargo.toml + name: Read Version Metadata runs-on: ubuntu-latest + needs: sync_versions_manifest outputs: version: ${{ steps.version.outputs.version }} tag: ${{ steps.version.outputs.tag }} @@ -63,26 +110,20 @@ jobs: steps: - name: Checkout uses: actions/checkout@v6 + with: + ref: ${{ needs.sync_versions_manifest.outputs.commit_sha }} + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' - name: Read version and set outputs id: version shell: bash run: | set -euo pipefail - VERSION="$(sed -n '/^\[workspace.package\]/,/^\[/{s/^version[[:space:]]*=[[:space:]]*"\([^"]*\)".*/\1/p}' Cargo.toml | head -n1)" - if [[ -z "$VERSION" ]]; then - echo "Failed to read [workspace.package].version from Cargo.toml" >&2 - exit 1 - fi - TAG="v${VERSION}" - PRE_RELEASE="${{ github.event.inputs.pre_release }}" - if [[ -z "$PRE_RELEASE" ]]; then - PRE_RELEASE="true" - fi - echo "version=$VERSION" >> "$GITHUB_OUTPUT" - echo "tag=$TAG" >> "$GITHUB_OUTPUT" - echo "pre_release=$PRE_RELEASE" >> "$GITHUB_OUTPUT" - echo "✅ Version: $VERSION, Tag: $TAG, Pre-release: $PRE_RELEASE" + python3 scripts/versions.py github-outputs --github-output "$GITHUB_OUTPUT" --repository "$GITHUB_REPOSITORY" license_compliance: name: License Compliance @@ -223,6 +264,14 @@ jobs: npm install --legacy-peer-deps npm run build + - name: Build React SDK (@kalamdb/react) + shell: bash + run: | + set -euo pipefail + cd link/sdks/typescript/react-old + npm install + npm run build + - name: Install Admin UI dependencies shell: bash run: | @@ -1427,6 +1476,7 @@ jobs: name: Publish GitHub Release runs-on: ubuntu-latest needs: + - sync_versions_manifest - license_compliance - build_cli_linux_x86_64 - build_cli_linux_aarch64 @@ -1448,6 +1498,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@v6 + with: + ref: ${{ needs.sync_versions_manifest.outputs.commit_sha }} - name: Update README release metadata shell: bash @@ -1507,6 +1559,7 @@ jobs: uses: softprops/action-gh-release@v2 with: tag_name: ${{ needs.read_version.outputs.tag }} + target_commitish: ${{ needs.sync_versions_manifest.outputs.commit_sha }} prerelease: ${{ needs.read_version.outputs.pre_release == 'true' }} generate_release_notes: true files: | @@ -1516,6 +1569,7 @@ jobs: dist/${{ needs.read_version.outputs.version }}/LICENSE.txt dist/${{ needs.read_version.outputs.version }}/NOTICE dist/${{ needs.read_version.outputs.version }}/THIRD_PARTY_LICENSES.md + versions.json docker: name: Push Docker image (Docker Hub) diff --git a/.github/workflows/sdks.yml b/.github/workflows/sdks.yml deleted file mode 100644 index 394ceb433..000000000 --- a/.github/workflows/sdks.yml +++ /dev/null @@ -1,683 +0,0 @@ -name: SDKs - -on: - workflow_dispatch: - inputs: - npm_publish: - description: "Publish TypeScript SDK packages to npm" - type: boolean - required: false - default: false - force_npm_publish: - description: "Force republish (unpublish existing version first)" - type: boolean - required: false - default: false - dart_publish: - description: "Publish Dart SDK to pub.dev" - type: boolean - required: false - default: false - -permissions: - contents: write - -env: - CARGO_TERM_COLOR: always - RUST_VERSION: "1.92.0" - WASM_PACK_VERSION: "0.14.0" - RUSTC_WRAPPER: "" - CARGO_BUILD_RUSTC_WRAPPER: "" - -jobs: - resolve_versions: - name: Resolve SDK Versions - runs-on: ubuntu-latest - outputs: - root_version: ${{ steps.versions.outputs.root_version }} - ts_version: ${{ steps.versions.outputs.ts_version }} - dart_version: ${{ steps.versions.outputs.dart_version }} - sdk_version: ${{ steps.versions.outputs.sdk_version }} - release_tag: ${{ steps.versions.outputs.release_tag }} - release_server_asset_name: ${{ steps.versions.outputs.release_server_asset_name }} - release_server_asset_url: ${{ steps.versions.outputs.release_server_asset_url }} - steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Resolve versions and release asset - id: versions - shell: bash - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - set -euo pipefail - - ROOT_VERSION="$(sed -n '/^\[workspace.package\]/,/^\[/{s/^version[[:space:]]*=[[:space:]]*"\([^"]*\)".*/\1/p}' Cargo.toml | head -n1)" - if [[ -z "$ROOT_VERSION" ]]; then - echo "Failed to read [workspace.package].version from Cargo.toml" >&2 - exit 1 - fi - - SDK_VERSION="$ROOT_VERSION" - RELEASE_TAG="v${SDK_VERSION}" - RELEASE_SERVER_ASSET_NAME="kalamdb-server-${SDK_VERSION}-linux-x86_64.tar.gz" - RELEASE_SERVER_ASSET_URL="https://github.com/${GITHUB_REPOSITORY}/releases/download/${RELEASE_TAG}/${RELEASE_SERVER_ASSET_NAME}" - - # Use the GitHub Releases API to verify the asset exists. - # Direct HEAD probes against the download URL fail because GitHub - # redirects to a time-limited S3 pre-signed GET URL that rejects HEAD. - API_URL="https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/tags/${RELEASE_TAG}" - echo "Checking GitHub release: ${API_URL}" - RELEASE_JSON="$(curl -fsS \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" \ - -H "Accept: application/vnd.github+json" \ - "$API_URL" 2>&1)" || { - echo "GitHub release ${RELEASE_TAG} not found or API error." >&2 - echo "Publish the GitHub release assets first, then rerun the SDK workflow." >&2 - exit 1 - } - - if ! echo "$RELEASE_JSON" | grep -qF "\"${RELEASE_SERVER_ASSET_NAME}\""; then - echo "Missing released server binary for ${RELEASE_TAG}: ${RELEASE_SERVER_ASSET_NAME}" >&2 - echo "Publish the GitHub release assets first, then rerun the SDK workflow." >&2 - exit 1 - fi - - echo "root_version=$ROOT_VERSION" >> "$GITHUB_OUTPUT" - echo "ts_version=$SDK_VERSION" >> "$GITHUB_OUTPUT" - echo "dart_version=$SDK_VERSION" >> "$GITHUB_OUTPUT" - echo "sdk_version=$SDK_VERSION" >> "$GITHUB_OUTPUT" - echo "release_tag=$RELEASE_TAG" >> "$GITHUB_OUTPUT" - echo "release_server_asset_name=$RELEASE_SERVER_ASSET_NAME" >> "$GITHUB_OUTPUT" - echo "release_server_asset_url=$RELEASE_SERVER_ASSET_URL" >> "$GITHUB_OUTPUT" - - echo "Resolved workspace version: $ROOT_VERSION" - echo "Resolved SDK version: $SDK_VERSION (from Cargo.toml)" - echo "Using published release server binary: $RELEASE_SERVER_ASSET_NAME" - - download_release_server_binary: - name: Download Released SDK Server Binary - runs-on: ubuntu-latest - needs: resolve_versions - steps: - - name: Download release server asset - shell: bash - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - set -euo pipefail - - ASSET_NAME="${{ needs.resolve_versions.outputs.release_server_asset_name }}" - ASSET_URL="${{ needs.resolve_versions.outputs.release_server_asset_url }}" - mkdir -p release-server dist - - curl -fsSL \ - -H "Authorization: Bearer ${GITHUB_TOKEN}" \ - -H "Accept: application/octet-stream" \ - -o "dist/${ASSET_NAME}" \ - "$ASSET_URL" - - tar -xzf "dist/${ASSET_NAME}" -C release-server - chmod +x "release-server/kalamdb-server-${{ needs.resolve_versions.outputs.sdk_version }}-linux-x86_64" - mv "release-server/kalamdb-server-${{ needs.resolve_versions.outputs.sdk_version }}-linux-x86_64" release-server/kalamdb-server - ls -la release-server/ - - - name: Upload SDK test server binary - uses: actions/upload-artifact@v6 - with: - name: sdk-test-server-linux-x86_64 - path: release-server/kalamdb-server - if-no-files-found: error - - sdk_tests_typescript: - name: TypeScript SDK Tests - runs-on: ubuntu-latest - needs: - - resolve_versions - - download_release_server_binary - if: ${{ always() && needs.resolve_versions.result == 'success' }} - outputs: - total: ${{ steps.parse_typescript_badge.outputs.total }} - passed: ${{ steps.parse_typescript_badge.outputs.passed }} - failed: ${{ steps.parse_typescript_badge.outputs.failed }} - message: ${{ steps.parse_typescript_badge.outputs.message }} - color: ${{ steps.parse_typescript_badge.outputs.color }} - steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Download server binary - uses: actions/download-artifact@v6 - with: - name: sdk-test-server-linux-x86_64 - path: . - - - name: Install system dependencies - shell: bash - run: | - sudo apt-get update - sudo apt-get install -y --no-install-recommends \ - clang libclang-dev pkg-config libssl-dev - - - name: Setup Rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ env.RUST_VERSION }} - targets: wasm32-unknown-unknown - - - name: Cache Rust - uses: Swatinem/rust-cache@v2 - with: - shared-key: sdk-tests-typescript - cache-on-failure: true - - - name: Setup Node 22 - uses: actions/setup-node@v6 - with: - node-version: 22 - - - name: Install wasm-pack - shell: bash - run: | - set -euo pipefail - cargo install wasm-pack --version "$WASM_PACK_VERSION" --locked - wasm-pack --version - - - name: Prepare server binary - shell: bash - run: | - set -euo pipefail - chmod +x ./kalamdb-server - echo "SDK test source: release-binary" - - - name: Run TypeScript client, consumer, and ORM npm package tests - id: run_typescript_tests - continue-on-error: true - shell: bash - env: - KALAMDB_URL: "http://localhost:8080" - KALAMDB_USER: "admin" - KALAMDB_PASSWORD: "kalamdb123" - KALAMDB_ROOT_PASSWORD: "kalamdb123" - KALAMDB_SERVER_BIN: ${{ github.workspace }}/kalamdb-server - TS_SDK_SERVER_LOG: ${{ github.workspace }}/ts-sdk-server.log - TS_SDK_TEST_OUTPUT: ${{ github.workspace }}/ts-sdk-test-output.txt - run: | - set -euo pipefail - ./scripts/test-typescript-sdk-release.sh - - - name: Verify TypeScript npm package test coverage - if: always() - shell: bash - run: | - set -euo pipefail - output="ts-sdk-test-output.txt" - for package in client consumer orm; do - if ! grep -q "Running @kalamdb/${package} tests" "$output"; then - echo "Missing @kalamdb/${package} npm package test run in ${output}" >&2 - exit 1 - fi - done - - - name: Parse TypeScript SDK test counts - if: always() - id: parse_typescript_badge - shell: bash - env: - STEP_OUTCOME: ${{ steps.run_typescript_tests.outcome }} - run: | - set -euo pipefail - python3 <<'PYTHON' - import os - import re - from pathlib import Path - - output_path = Path("ts-sdk-test-output.txt") - text = output_path.read_text(encoding="utf-8", errors="replace") if output_path.exists() else "" - - total = sum(int(value) for value in re.findall(r"^(?:#|ℹ) tests (\d+)$", text, flags=re.MULTILINE)) - passed = sum(int(value) for value in re.findall(r"^(?:#|ℹ) pass (\d+)$", text, flags=re.MULTILINE)) - failed = sum(int(value) for value in re.findall(r"^(?:#|ℹ) fail (\d+)$", text, flags=re.MULTILINE)) - outcome = os.environ.get("STEP_OUTCOME", "failure") - - if total == 0 and outcome == "success": - message = "passed" - color = "brightgreen" - elif total == 0: - message = "failed" - color = "red" - else: - message = f"{passed}/{total} passed" if failed == 0 and outcome == "success" else f"{passed}/{total} passed, {failed} failed" - color = "brightgreen" if failed == 0 and outcome == "success" else "red" - - with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as handle: - handle.write(f"total={total}\n") - handle.write(f"passed={passed}\n") - handle.write(f"failed={failed}\n") - handle.write(f"message={message}\n") - handle.write(f"color={color}\n") - PYTHON - - - name: Print TypeScript SDK server log - if: always() - shell: bash - run: | - echo "=== TypeScript SDK server log ===" - cat ts-sdk-server.log || true - - - name: Upload server log - if: always() - uses: actions/upload-artifact@v6 - with: - name: ts-sdk-server-log - path: ts-sdk-server.log - if-no-files-found: ignore - - - name: Upload TypeScript SDK test output - if: always() - uses: actions/upload-artifact@v6 - with: - name: ts-sdk-test-output - path: ts-sdk-test-output.txt - if-no-files-found: ignore - - - name: Fail if TypeScript SDK tests failed - if: always() && steps.run_typescript_tests.outcome != 'success' - shell: bash - run: | - echo "TypeScript SDK tests failed" >&2 - exit 1 - - sdk_tests_dart: - name: Dart SDK Tests - runs-on: ubuntu-latest - needs: - - resolve_versions - - download_release_server_binary - if: ${{ always() && needs.resolve_versions.result == 'success' }} - outputs: - total: ${{ steps.parse_dart_badge.outputs.total }} - passed: ${{ steps.parse_dart_badge.outputs.passed }} - failed: ${{ steps.parse_dart_badge.outputs.failed }} - message: ${{ steps.parse_dart_badge.outputs.message }} - color: ${{ steps.parse_dart_badge.outputs.color }} - steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Download server binary - uses: actions/download-artifact@v6 - with: - name: sdk-test-server-linux-x86_64 - path: . - - - name: Install system dependencies - shell: bash - run: | - sudo apt-get update - sudo apt-get install -y --no-install-recommends \ - clang libclang-dev pkg-config libssl-dev - - - name: Setup Rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ env.RUST_VERSION }} - - - name: Cache Rust (dart bridge) - uses: Swatinem/rust-cache@v2 - with: - shared-key: dart-bridge - cache-on-failure: true - workspaces: link -> link/target - - - name: Setup Flutter - uses: subosito/flutter-action@v2 - with: - channel: stable - - - name: Prepare server binary - shell: bash - run: | - set -euo pipefail - chmod +x ./kalamdb-server - echo "SDK test source: release-binary" - - - name: Start SDK test server - shell: bash - run: | - set -euo pipefail - cp backend/server.example.toml server.toml - sed -i 's|data_path = "./data"|data_path = "./test-data"|g' server.toml - sed -i 's|logs_path = "./logs"|logs_path = "./test-data/logs"|g' server.toml - sed -i 's|jwt_secret = ".*"|jwt_secret = "sdk-test-secret-key-minimum-32-characters-long"|g' server.toml - mkdir -p test-data/rocksdb test-data/storage test-data/logs - ./kalamdb-server server.toml > dart-sdk-server.log 2>&1 & - SERVER_PID=$! - echo "SERVER_PID=$SERVER_PID" >> "$GITHUB_ENV" - for i in {1..60}; do - if curl -sf http://localhost:8080/health > /dev/null 2>&1 || curl -sf http://localhost:8080/v1/api/healthcheck > /dev/null 2>&1; then - echo "✅ SDK test server ready (${i}s)" - if [[ -s dart-sdk-server.log ]]; then - echo "Recent server log output:" - tail -n 40 dart-sdk-server.log || true - fi - exit 0 - fi - kill -0 "$SERVER_PID" 2>/dev/null || { echo "❌ Server died"; cat dart-sdk-server.log; exit 1; } - echo " Waiting... ($i/60)" - sleep 1 - done - echo "❌ Timed out waiting for SDK test server" - cat dart-sdk-server.log || true - exit 1 - - - name: Run Dart SDK tests - id: run_dart_tests - continue-on-error: true - shell: bash - working-directory: link/sdks/dart - env: - KALAMDB_URL: "http://localhost:8080" - KALAMDB_USER: "admin" - KALAMDB_PASSWORD: "kalamdb123" - KALAMDB_ROOT_PASSWORD: "kalamdb123" - DART_SDK_MODELS_MACHINE_OUTPUT: ${{ github.workspace }}/dart-sdk-models-machine.jsonl - DART_SDK_E2E_MACHINE_OUTPUT: ${{ github.workspace }}/dart-sdk-e2e-machine.jsonl - run: | - set -euo pipefail - bash ./test.sh - - - name: Parse Dart SDK test counts - if: always() - id: parse_dart_badge - shell: bash - env: - STEP_OUTCOME: ${{ steps.run_dart_tests.outcome }} - run: | - set -euo pipefail - python3 <<'PYTHON' - import json - import os - from pathlib import Path - - def iter_events(path: Path): - if not path.exists(): - return - for raw_line in path.read_text(encoding="utf-8", errors="replace").splitlines(): - line = raw_line.strip() - if not line: - continue - try: - parsed = json.loads(line) - except json.JSONDecodeError: - continue - events = parsed if isinstance(parsed, list) else [parsed] - for event in events: - if isinstance(event, dict): - yield event - - hidden = {} - total = 0 - passed = 0 - failed = 0 - - for path_str in ("dart-sdk-models-machine.jsonl", "dart-sdk-e2e-machine.jsonl"): - for event in iter_events(Path(path_str)): - event_type = event.get("type") - if event_type == "testStart": - test_info = event.get("test", {}) - test_id = test_info.get("id") - if test_id is not None: - hidden[test_id] = bool(test_info.get("hidden", False)) - elif event_type == "testDone": - test_id = event.get("testID") - if hidden.get(test_id, False): - continue - total += 1 - result = event.get("result") - if result == "success": - passed += 1 - elif result in {"failure", "error"}: - failed += 1 - - outcome = os.environ.get("STEP_OUTCOME", "failure") - if total == 0 and outcome == "success": - message = "passed" - color = "brightgreen" - elif total == 0: - message = "failed" - color = "red" - else: - message = f"{passed}/{total} passed" if failed == 0 and outcome == "success" else f"{passed}/{total} passed, {failed} failed" - color = "brightgreen" if failed == 0 and outcome == "success" else "red" - - with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as handle: - handle.write(f"total={total}\n") - handle.write(f"passed={passed}\n") - handle.write(f"failed={failed}\n") - handle.write(f"message={message}\n") - handle.write(f"color={color}\n") - PYTHON - - - name: Print Dart SDK server log - if: always() - shell: bash - run: | - echo "=== Dart SDK server log ===" - cat dart-sdk-server.log || true - - - name: Stop SDK test server - if: always() - shell: bash - run: | - [[ -n "${SERVER_PID:-}" ]] && kill "$SERVER_PID" 2>/dev/null || true - - - name: Upload server log - if: always() - uses: actions/upload-artifact@v6 - with: - name: dart-sdk-server-log - path: dart-sdk-server.log - if-no-files-found: ignore - - - name: Upload Dart SDK test output - if: always() - uses: actions/upload-artifact@v6 - with: - name: dart-sdk-test-output - path: | - dart-sdk-models-machine.jsonl - dart-sdk-e2e-machine.jsonl - if-no-files-found: ignore - - - name: Fail if Dart SDK tests failed - if: always() && steps.run_dart_tests.outcome != 'success' - shell: bash - run: | - echo "Dart SDK tests failed" >&2 - exit 1 - - update_sdk_badges: - name: Update SDK Badges - runs-on: ubuntu-latest - needs: - - sdk_tests_typescript - - sdk_tests_dart - if: ${{ always() && github.ref == 'refs/heads/main' && needs.sdk_tests_typescript.result != 'cancelled' && needs.sdk_tests_dart.result != 'cancelled' }} - steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Update SDK badge files - shell: bash - run: | - set -euo pipefail - - git config user.name "github-actions[bot]" - git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - - render_badges() { - mkdir -p .github/badges - - cat > .github/badges/sdk-typescript-tests.json < .github/badges/sdk-dart-tests.json <&2 - exit 1 - - publish_npm: - name: Publish TypeScript SDK packages to npm - runs-on: ubuntu-latest - needs: - - resolve_versions - - sdk_tests_typescript - if: ${{ github.event.inputs.npm_publish == 'true' }} - steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Setup Node.js - uses: actions/setup-node@v6 - with: - node-version: '22' - registry-url: 'https://registry.npmjs.org' - - - name: Setup Rust - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ env.RUST_VERSION }} - targets: wasm32-unknown-unknown - - - name: Install wasm-pack - shell: bash - run: | - set -euo pipefail - cargo install wasm-pack --version "$WASM_PACK_VERSION" --locked - wasm-pack --version - - - name: Cache Rust - uses: Swatinem/rust-cache@v2 - with: - shared-key: npm-publish - cache-on-failure: true - - - name: Publish @kalamdb/client to npm - shell: bash - working-directory: link/sdks/typescript/client - env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - run: | - set -euo pipefail - VERSION="${{ needs.resolve_versions.outputs.ts_version }}" - FORCE_FLAG="" - if [[ "${{ github.event.inputs.force_npm_publish }}" == "true" ]]; then - FORCE_FLAG="--force" - fi - chmod +x ./publish.sh - ./publish.sh --version "$VERSION" $FORCE_FLAG - - - name: Publish @kalamdb/consumer to npm - shell: bash - working-directory: link/sdks/typescript/consumer - env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - run: | - set -euo pipefail - VERSION="${{ needs.resolve_versions.outputs.ts_version }}" - FORCE_FLAG="" - if [[ "${{ github.event.inputs.force_npm_publish }}" == "true" ]]; then - FORCE_FLAG="--force" - fi - chmod +x ./publish.sh - ./publish.sh --version "$VERSION" $FORCE_FLAG - - - name: Publish @kalamdb/orm to npm - shell: bash - working-directory: link/sdks/typescript/orm - env: - NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} - run: | - set -euo pipefail - VERSION="${{ needs.resolve_versions.outputs.ts_version }}" - FORCE_FLAG="" - if [[ "${{ github.event.inputs.force_npm_publish }}" == "true" ]]; then - FORCE_FLAG="--force" - fi - chmod +x ./publish.sh - ./publish.sh --version "$VERSION" $FORCE_FLAG - - publish_dart: - name: Publish Dart SDK to pub.dev - runs-on: ubuntu-latest - needs: - - sdk_tests_dart - if: ${{ github.event.inputs.dart_publish == 'true' }} - steps: - - name: Checkout - uses: actions/checkout@v6 - - - name: Setup Flutter - uses: subosito/flutter-action@v2 - with: - channel: stable - - - name: Configure pub.dev credentials - shell: bash - env: - PUB_DEV_CREDENTIALS_JSON: ${{ secrets.PUB_DEV_CREDENTIALS_JSON }} - run: | - set -euo pipefail - if [[ -z "${PUB_DEV_CREDENTIALS_JSON}" ]]; then - echo "❌ Missing required secret: PUB_DEV_CREDENTIALS_JSON" - exit 1 - fi - - mkdir -p "$HOME/.pub-cache" - printf '%s' "$PUB_DEV_CREDENTIALS_JSON" > "$HOME/.pub-cache/credentials.json" - chmod 600 "$HOME/.pub-cache/credentials.json" - - - name: Publish Dart package - shell: bash - working-directory: link/sdks/dart - run: | - set -euo pipefail - chmod +x ./publish.sh - ./publish.sh diff --git a/.github/workflows/typescript-sdk.yml b/.github/workflows/typescript-sdk.yml new file mode 100644 index 000000000..519c1f14b --- /dev/null +++ b/.github/workflows/typescript-sdk.yml @@ -0,0 +1,373 @@ +name: TypeScript SDK + +on: + push: + branches: [main, dev] + paths: + - 'link/sdks/typescript/**' + - 'link/kalam-client/**' + - 'link/kalam-consumer-wasm/**' + - 'link/link-common/**' + - 'backend/server.example.toml' + - 'scripts/test-typescript-sdk-release.sh' + - 'versions.json' + - 'scripts/versions.py' + - '.github/workflows/typescript-sdk.yml' + pull_request: + paths: + - 'link/sdks/typescript/**' + - 'link/kalam-client/**' + - 'link/kalam-consumer-wasm/**' + - 'link/link-common/**' + - 'backend/server.example.toml' + - 'scripts/test-typescript-sdk-release.sh' + - 'versions.json' + - 'scripts/versions.py' + - '.github/workflows/typescript-sdk.yml' + workflow_dispatch: + inputs: + publish: + description: "Publish TypeScript SDK packages to npm" + type: boolean + required: false + default: false + force_publish: + description: "Force republish (unpublish existing version first)" + type: boolean + required: false + default: false + workflow_call: + inputs: + publish: + description: "Publish TypeScript SDK packages to npm" + type: boolean + required: false + default: false + force_publish: + description: "Force republish (unpublish existing version first)" + type: boolean + required: false + default: false + +permissions: + contents: write + +env: + CARGO_TERM_COLOR: always + RUST_VERSION: "1.92.0" + WASM_PACK_VERSION: "0.14.0" + RUSTC_WRAPPER: "" + CARGO_BUILD_RUSTC_WRAPPER: "" + +jobs: + resolve_versions: + name: Resolve TypeScript SDK Version + runs-on: ubuntu-latest + outputs: + typescript_version: ${{ steps.versions.outputs.typescript_version }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Verify versions.json + shell: bash + run: | + set -euo pipefail + python3 scripts/versions.py verify + + - name: Resolve versions from versions.json + id: versions + shell: bash + run: | + set -euo pipefail + python3 scripts/versions.py github-outputs --github-output "$GITHUB_OUTPUT" --repository "$GITHUB_REPOSITORY" + + test: + name: TypeScript SDK Tests + runs-on: ubuntu-latest + needs: resolve_versions + outputs: + total: ${{ steps.parse_badge.outputs.total }} + passed: ${{ steps.parse_badge.outputs.passed }} + failed: ${{ steps.parse_badge.outputs.failed }} + message: ${{ steps.parse_badge.outputs.message }} + color: ${{ steps.parse_badge.outputs.color }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Install system dependencies + shell: bash + run: | + sudo apt-get update + sudo apt-get install -y --no-install-recommends \ + clang libclang-dev pkg-config libssl-dev + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + targets: wasm32-unknown-unknown + + - name: Cache Rust + uses: Swatinem/rust-cache@v2 + with: + shared-key: sdk-tests-typescript + cache-on-failure: true + + - name: Setup Node 22 + uses: actions/setup-node@v6 + with: + node-version: 22 + + - name: Install wasm-pack + shell: bash + run: | + set -euo pipefail + cargo install wasm-pack --version "$WASM_PACK_VERSION" --locked + wasm-pack --version + + - name: Build local SDK test server binary + shell: bash + run: | + set -euo pipefail + cargo build --manifest-path backend/Cargo.toml --bin kalamdb-server + chmod +x backend/target/debug/kalamdb-server + + - name: Run TypeScript client, consumer, and ORM npm package tests + id: run_tests + continue-on-error: true + shell: bash + env: + KALAMDB_URL: "http://localhost:8080" + KALAMDB_USER: "admin" + KALAMDB_PASSWORD: "kalamdb123" + KALAMDB_ROOT_PASSWORD: "kalamdb123" + KALAMDB_SERVER_BIN: ${{ github.workspace }}/backend/target/debug/kalamdb-server + TS_SDK_SERVER_LOG: ${{ github.workspace }}/ts-sdk-server.log + TS_SDK_TEST_OUTPUT: ${{ github.workspace }}/ts-sdk-test-output.txt + run: | + set -euo pipefail + ./scripts/test-typescript-sdk-release.sh + + - name: Verify TypeScript npm package test coverage + if: always() + shell: bash + run: | + set -euo pipefail + output="ts-sdk-test-output.txt" + for package in client consumer orm; do + if ! grep -q "Running @kalamdb/${package} tests" "$output"; then + echo "Missing @kalamdb/${package} npm package test run in ${output}" >&2 + exit 1 + fi + done + + - name: Parse TypeScript SDK test counts + if: always() + id: parse_badge + shell: bash + env: + STEP_OUTCOME: ${{ steps.run_tests.outcome }} + run: | + set -euo pipefail + python3 <<'PYTHON' + import os + import re + from pathlib import Path + + output_path = Path("ts-sdk-test-output.txt") + text = output_path.read_text(encoding="utf-8", errors="replace") if output_path.exists() else "" + + total = sum(int(value) for value in re.findall(r"^(?:#|ℹ) tests (\d+)$", text, flags=re.MULTILINE)) + passed = sum(int(value) for value in re.findall(r"^(?:#|ℹ) pass (\d+)$", text, flags=re.MULTILINE)) + failed = sum(int(value) for value in re.findall(r"^(?:#|ℹ) fail (\d+)$", text, flags=re.MULTILINE)) + outcome = os.environ.get("STEP_OUTCOME", "failure") + + if total == 0 and outcome == "success": + message = "passed" + color = "brightgreen" + elif total == 0: + message = "failed" + color = "red" + else: + message = f"{passed}/{total} passed" if failed == 0 and outcome == "success" else f"{passed}/{total} passed, {failed} failed" + color = "brightgreen" if failed == 0 and outcome == "success" else "red" + + with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as handle: + handle.write(f"total={total}\n") + handle.write(f"passed={passed}\n") + handle.write(f"failed={failed}\n") + handle.write(f"message={message}\n") + handle.write(f"color={color}\n") + PYTHON + + - name: Print TypeScript SDK server log + if: always() + shell: bash + run: | + echo "=== TypeScript SDK server log ===" + cat ts-sdk-server.log || true + + - name: Upload server log + if: always() + uses: actions/upload-artifact@v6 + with: + name: ts-sdk-server-log + path: ts-sdk-server.log + if-no-files-found: ignore + + - name: Upload TypeScript SDK test output + if: always() + uses: actions/upload-artifact@v6 + with: + name: ts-sdk-test-output + path: ts-sdk-test-output.txt + if-no-files-found: ignore + + - name: Fail if TypeScript SDK tests failed + if: always() && steps.run_tests.outcome != 'success' + shell: bash + run: | + echo "TypeScript SDK tests failed" >&2 + exit 1 + + update_badge: + name: Update TypeScript SDK Badge + runs-on: ubuntu-latest + needs: test + if: ${{ always() && github.ref == 'refs/heads/main' && needs.test.result != 'cancelled' }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Update TypeScript SDK badge file + shell: bash + run: | + set -euo pipefail + + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + + render_badge() { + mkdir -p .github/badges + + cat > .github/badges/sdk-typescript-tests.json <&2 + exit 1 + + publish: + name: Publish TypeScript SDK packages to npm + runs-on: ubuntu-latest + needs: + - resolve_versions + - test + if: ${{ inputs.publish }} + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version: '22' + registry-url: 'https://registry.npmjs.org' + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ env.RUST_VERSION }} + targets: wasm32-unknown-unknown + + - name: Install wasm-pack + shell: bash + run: | + set -euo pipefail + cargo install wasm-pack --version "$WASM_PACK_VERSION" --locked + wasm-pack --version + + - name: Cache Rust + uses: Swatinem/rust-cache@v2 + with: + shared-key: npm-publish + cache-on-failure: true + + - name: Publish @kalamdb/client to npm + shell: bash + working-directory: link/sdks/typescript/client + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + run: | + set -euo pipefail + VERSION="${{ needs.resolve_versions.outputs.typescript_version }}" + FORCE_FLAG="" + if [[ "${{ inputs.force_publish }}" == "true" ]]; then + FORCE_FLAG="--force" + fi + chmod +x ./publish.sh + ./publish.sh --version "$VERSION" $FORCE_FLAG + + - name: Publish @kalamdb/consumer to npm + shell: bash + working-directory: link/sdks/typescript/consumer + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + run: | + set -euo pipefail + VERSION="${{ needs.resolve_versions.outputs.typescript_version }}" + FORCE_FLAG="" + if [[ "${{ inputs.force_publish }}" == "true" ]]; then + FORCE_FLAG="--force" + fi + chmod +x ./publish.sh + ./publish.sh --version "$VERSION" $FORCE_FLAG + + - name: Publish @kalamdb/orm to npm + shell: bash + working-directory: link/sdks/typescript/orm + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + run: | + set -euo pipefail + VERSION="${{ needs.resolve_versions.outputs.typescript_version }}" + FORCE_FLAG="" + if [[ "${{ inputs.force_publish }}" == "true" ]]; then + FORCE_FLAG="--force" + fi + chmod +x ./publish.sh + ./publish.sh --version "$VERSION" $FORCE_FLAG \ No newline at end of file diff --git a/.github/workflows/versions.yml b/.github/workflows/versions.yml new file mode 100644 index 000000000..d9538818d --- /dev/null +++ b/.github/workflows/versions.yml @@ -0,0 +1,64 @@ +name: Version Drift + +on: + push: + branches: [main] + paths: + - 'Cargo.toml' + - 'backend/Cargo.toml' + - 'cli/Cargo.toml' + - 'pg/Cargo.toml' + - 'link/kalam-client/Cargo.toml' + - 'link/sdks/typescript/**/package.json' + - 'link/sdks/dart/pubspec.yaml' + - 'link/sdks/python/pyproject.toml' + - 'link/sdks/python/Cargo.toml' + - 'versions.json' + - 'scripts/versions.py' + - '.github/workflows/versions.yml' + - '.github/workflows/release.yml' + - '.github/workflows/typescript-sdk.yml' + - '.github/workflows/dart-sdk.yml' + - '.github/workflows/python-sdk.yml' + pull_request: + paths: + - 'Cargo.toml' + - 'backend/Cargo.toml' + - 'cli/Cargo.toml' + - 'pg/Cargo.toml' + - 'link/kalam-client/Cargo.toml' + - 'link/sdks/typescript/**/package.json' + - 'link/sdks/dart/pubspec.yaml' + - 'link/sdks/python/pyproject.toml' + - 'link/sdks/python/Cargo.toml' + - 'versions.json' + - 'scripts/versions.py' + - '.github/workflows/versions.yml' + - '.github/workflows/release.yml' + - '.github/workflows/typescript-sdk.yml' + - '.github/workflows/dart-sdk.yml' + - '.github/workflows/python-sdk.yml' + workflow_dispatch: + +permissions: + contents: read + +jobs: + verify: + name: Verify versions.json + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Verify versions manifest + shell: bash + run: | + set -euo pipefail + python3 scripts/versions.py verify \ No newline at end of file diff --git a/.specify/memory/constitution.md b/.specify/memory/constitution.md index a4670ff46..4983c7993 100644 --- a/.specify/memory/constitution.md +++ b/.specify/memory/constitution.md @@ -1,50 +1,55 @@ -# [PROJECT_NAME] Constitution - +# KalamDB Speckit Constitution ## Core Principles -### [PRINCIPLE_1_NAME] - -[PRINCIPLE_1_DESCRIPTION] - +### I. Performance-First Execution -### [PRINCIPLE_2_NAME] - -[PRINCIPLE_2_DESCRIPTION] - +- Features, plans, and tasks MUST prefer lower runtime cost, lower allocation pressure, smaller dependency surface, and faster build feedback when tradeoffs are otherwise comparable. +- Changes in hot paths MUST avoid extra SQL rewrite passes, duplicate orchestration layers, or framework-specific work in shared core packages unless a measured benefit justifies the added complexity. +- Performance-oriented benchmarks and perf e2e runs MUST record per-test runtime in seconds. -### [PRINCIPLE_3_NAME] - -[PRINCIPLE_3_DESCRIPTION] - +### II. Boundary Ownership Before Convenience -### [PRINCIPLE_4_NAME] - -[PRINCIPLE_4_DESCRIPTION] - +- Work MUST respect package and crate ownership boundaries: shared live-query behavior belongs in framework-agnostic client layers, React-only concerns belong in the React SDK package, filesystem logic belongs in `kalamdb-filestore`, and key-value engine logic belongs in `kalamdb-store`. +- Orchestration layers MUST delegate to the owning crate or package instead of embedding lower-level storage or framework-specific details directly. +- Public contracts SHOULD use typed models and reusable shared abstractions instead of duplicating parallel representations. -### [PRINCIPLE_5_NAME] - -[PRINCIPLE_5_DESCRIPTION] - +### III. Minimal Dependency Expansion -## [SECTION_2_NAME] - +- New dependencies MUST use the smallest viable feature set and SHOULD be added only in the package that directly needs them. +- Shared packages MUST remain free of UI-framework dependencies unless the shared package itself is the framework binding. +- Plans and tasks SHOULD favor reuse of existing KalamDB packages and tooling before introducing new libraries or parallel implementations. -[SECTION_2_CONTENT] - +### IV. Validation, Testing, and Documentation Ship Together -## [SECTION_3_NAME] - +- Every feature plan MUST define focused executable validation for the affected surface before implementation begins. +- SDK changes under `link/sdks/**` MUST include test coverage and MUST update both repo-side docs and the corresponding KalamSite SDK docs. +- Tasks for each user story MUST preserve an independently testable slice so implementation can be validated incrementally. -[SECTION_3_CONTENT] - +### V. Composable, Low-Boilerplate APIs + +- Shared behavior intended for more than one UI framework MUST be defined in a framework-agnostic layer before framework wrappers are added. +- React-facing APIs SHOULD prefer hook-first composition with thin wrapper components instead of forcing nested render-prop or mirror-state patterns for advanced screens. +- Derived screen state SHOULD remain a pure projection over authoritative live state rather than becoming a second client-side source of truth. + +## Architecture and Delivery Constraints + +- Architecture-affecting work MUST update the relevant design artifacts so specs, plans, tasks, and public contracts stay aligned with the intended implementation boundaries. +- Generated directories and generated SDK outputs MUST not be edited manually. +- External documentation paths that are out of workspace scope may be referenced in plans and tasks, but implementation work MUST call out when those updates cannot be validated locally. + +## Workflow and Quality Gates + +- Every plan MUST include a constitution check that maps the feature to these principles before implementation starts. +- When the constitution changes, active feature plans and tasks that rely on it MUST be reviewed and updated in the same change where practical. +- Complexity exceptions MUST be made explicit in the relevant plan instead of being implied by implementation details. +- Validation gates SHOULD use the narrowest executable check that can falsify the intended behavior before broader workspace checks are run. ## Governance - -[GOVERNANCE_RULES] - +- This constitution supersedes conflicting guidance in feature specs, plans, and task lists. +- `AGENTS.md` and `.github/copilot-instructions.md` provide operational guidance for day-to-day work, but they do not weaken the principles in this constitution. +- Amendments require updating this file, documenting the reason in the associated change, and realigning affected active planning artifacts when needed. +- Compliance with this constitution MUST be checked before `/speckit.implement` begins. -**Version**: [CONSTITUTION_VERSION] | **Ratified**: [RATIFICATION_DATE] | **Last Amended**: [LAST_AMENDED_DATE] - +**Version**: 1.0.0 | **Ratified**: 2026-05-07 | **Last Amended**: 2026-05-07 diff --git a/AGENTS.md b/AGENTS.md index cab76a50d..b35ee4148 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -52,6 +52,14 @@ use kalamdb_commons::models::UserId; - Only edit the version in root `Cargo.toml` - All crates will automatically use the new version +## SDK Versioning Rules + +- Root Rust release components share the root workspace version from `Cargo.toml`: server, CLI, PG extension, and the current `link/kalam-client` crate. +- Each non-Rust SDK keeps its own package version source, but all packages under `link/sdks/typescript/**` move as one cohort and must use the same package version. +- Internal TypeScript compatibility ranges for sibling KalamDB packages must use a prerelease-safe bounded floor. Do not use `>=X.Y.Z` while the shared SDK cohort is still on prereleases, because npm semver excludes versions like `X.Y.Z-beta.1` from that range. Use `>=X.Y.Z-0 =0.5.0-0 <0.6.0`. +- For local TypeScript SDK installs and tests before publish, keep sibling KalamDB packages available via `file:` devDependencies. A peerDependency alone is not enough when the depended-on package version has not been published yet. +- When SDK versions or internal SDK dependency ranges change, update `versions.json` and validate with `python3 scripts/versions.py verify`. + ## Active Technologies - Rust 1.92+ (stable toolchain, edition 2021) - RocksDB 0.24, Apache Arrow 52.0, Apache Parquet 52.0, DataFusion 40.0, Actix-Web 4.4 diff --git a/Cargo.lock b/Cargo.lock index d174b2947..8f83e30d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -911,9 +911,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bcrypt" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523ab528ce3a7ada6597f8ccf5bd8d85ebe26d5edf311cad4d1d3cfb2d357ac6" +checksum = "24ae5479c93d3720e4c1dbd6b945b97457c50cb672781104768190371df1a905" dependencies = [ "base64", "blowfish", @@ -998,9 +998,9 @@ dependencies = [ [[package]] name = "blowfish" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" +checksum = "62ce3946557b35e71d1bbe07ec385073ce9eda05043f95de134eb578fcf1a298" dependencies = [ "byteorder", "cipher", @@ -1273,11 +1273,11 @@ dependencies = [ [[package]] name = "cipher" -version = "0.4.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +checksum = "e34d8227fe1ba289043aeb13792056ff80fd6de1a9f49137a5f499de8e8c78ea" dependencies = [ - "crypto-common 0.1.7", + "crypto-common 0.2.1", "inout", ] @@ -3671,11 +3671,11 @@ dependencies = [ [[package]] name = "inout" -version = "0.1.4" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7" dependencies = [ - "generic-array", + "hybrid-array", ] [[package]] @@ -3805,9 +3805,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.97" +version = "0.3.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1840c94c045fbcf8ba2812c95db44499f7c64910a912551aaaa541decebcacf" +checksum = "67df7112613f8bfd9150013a0314e196f4800d3201ae742489d999db2f979f08" dependencies = [ "cfg-if", "futures-util", @@ -3832,7 +3832,7 @@ dependencies = [ [[package]] name = "kalam-cli" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "anyhow", "assert_cmd", @@ -3874,7 +3874,7 @@ dependencies = [ [[package]] name = "kalam-client" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "kalamdb-configs", "kalamdb-server", @@ -3889,7 +3889,7 @@ dependencies = [ [[package]] name = "kalam-consumer-wasm" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "base64", "js-sys", @@ -3903,7 +3903,7 @@ dependencies = [ [[package]] name = "kalam-link-dart" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "anyhow", "flutter_rust_bridge", @@ -3915,7 +3915,7 @@ dependencies = [ [[package]] name = "kalam-pg-api" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "async-trait", @@ -3927,7 +3927,7 @@ dependencies = [ [[package]] name = "kalam-pg-client" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "arrow-ipc", @@ -3947,7 +3947,7 @@ dependencies = [ [[package]] name = "kalam-pg-common" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "datafusion-common", "serde", @@ -3956,7 +3956,7 @@ dependencies = [ [[package]] name = "kalam-pg-extension" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "async-trait", @@ -3987,7 +3987,7 @@ dependencies = [ [[package]] name = "kalam-pg-fdw" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "datafusion-common", "kalam-pg-api", @@ -3998,7 +3998,7 @@ dependencies = [ [[package]] name = "kalam-pg-types" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "kalam-pg-common", "kalamdb-commons", @@ -4006,7 +4006,7 @@ dependencies = [ [[package]] name = "kalamdb-api" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "actix-files", "actix-multipart", @@ -4049,7 +4049,7 @@ dependencies = [ [[package]] name = "kalamdb-auth" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "actix-web", "anyhow", @@ -4079,7 +4079,7 @@ dependencies = [ [[package]] name = "kalamdb-commons" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "arrow-schema", @@ -4107,7 +4107,7 @@ dependencies = [ [[package]] name = "kalamdb-configs" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "anyhow", "ipnet", @@ -4118,7 +4118,7 @@ dependencies = [ [[package]] name = "kalamdb-core" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "anyhow", "arrow", @@ -4174,7 +4174,7 @@ dependencies = [ [[package]] name = "kalamdb-datafusion-sources" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "arrow-schema", @@ -4192,7 +4192,7 @@ dependencies = [ [[package]] name = "kalamdb-dba" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "chrono", "datafusion", @@ -4210,7 +4210,7 @@ dependencies = [ [[package]] name = "kalamdb-dialect" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "anyhow", "arrow", @@ -4228,7 +4228,7 @@ dependencies = [ [[package]] name = "kalamdb-filestore" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "bytes", @@ -4259,7 +4259,7 @@ dependencies = [ [[package]] name = "kalamdb-handlers" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "kalamdb-commons", "kalamdb-core", @@ -4274,7 +4274,7 @@ dependencies = [ [[package]] name = "kalamdb-handlers-admin" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "chrono", @@ -4294,7 +4294,7 @@ dependencies = [ [[package]] name = "kalamdb-handlers-ddl" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "chrono", @@ -4318,7 +4318,7 @@ dependencies = [ [[package]] name = "kalamdb-handlers-stream" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "chrono", "datafusion", @@ -4335,7 +4335,7 @@ dependencies = [ [[package]] name = "kalamdb-handlers-support" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "chrono", "datafusion", @@ -4352,7 +4352,7 @@ dependencies = [ [[package]] name = "kalamdb-handlers-user" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "chrono", "kalamdb-auth", @@ -4368,7 +4368,7 @@ dependencies = [ [[package]] name = "kalamdb-jobs" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "async-trait", "chrono", @@ -4396,7 +4396,7 @@ dependencies = [ [[package]] name = "kalamdb-live" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "async-trait", @@ -4424,7 +4424,7 @@ dependencies = [ [[package]] name = "kalamdb-macros" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "proc-macro2", "quote", @@ -4433,7 +4433,7 @@ dependencies = [ [[package]] name = "kalamdb-observability" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "cc", "chrono", @@ -4446,7 +4446,7 @@ dependencies = [ [[package]] name = "kalamdb-pg" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "arrow-ipc", @@ -4473,7 +4473,7 @@ dependencies = [ [[package]] name = "kalamdb-plan-cache" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "datafusion", "kalamdb-commons", @@ -4482,7 +4482,7 @@ dependencies = [ [[package]] name = "kalamdb-publisher" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "chrono", "dashmap 6.1.0", @@ -4499,7 +4499,7 @@ dependencies = [ [[package]] name = "kalamdb-raft" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "async-trait", "chrono", @@ -4535,7 +4535,7 @@ dependencies = [ [[package]] name = "kalamdb-server" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "actix-cors", "actix-web", @@ -4595,7 +4595,7 @@ dependencies = [ [[package]] name = "kalamdb-server-auth" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "log", "rcgen", @@ -4605,7 +4605,7 @@ dependencies = [ [[package]] name = "kalamdb-session" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "kalamdb-commons", "tokio", @@ -4613,7 +4613,7 @@ dependencies = [ [[package]] name = "kalamdb-session-datafusion" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "async-trait", @@ -4626,7 +4626,7 @@ dependencies = [ [[package]] name = "kalamdb-sharding" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "kalamdb-commons", "kalamdb-configs", @@ -4635,7 +4635,7 @@ dependencies = [ [[package]] name = "kalamdb-store" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "anyhow", "async-trait", @@ -4657,7 +4657,7 @@ dependencies = [ [[package]] name = "kalamdb-streams" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "chrono", "dashmap 6.1.0", @@ -4671,7 +4671,7 @@ dependencies = [ [[package]] name = "kalamdb-system" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "async-trait", @@ -4696,7 +4696,7 @@ dependencies = [ [[package]] name = "kalamdb-tables" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "async-trait", @@ -4729,7 +4729,7 @@ dependencies = [ [[package]] name = "kalamdb-transactions" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "datafusion", "futures-util", @@ -4741,7 +4741,7 @@ dependencies = [ [[package]] name = "kalamdb-vector" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "async-trait", "bytes", @@ -4763,7 +4763,7 @@ dependencies = [ [[package]] name = "kalamdb-views" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "arrow", "async-trait", @@ -4949,7 +4949,7 @@ dependencies = [ [[package]] name = "link-common" -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" dependencies = [ "aws-lc-rs", "base64", @@ -7868,9 +7868,9 @@ checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db" [[package]] name = "tonic" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" +checksum = "ac2a5518c70fa84342385732db33fb3f44bc4cc748936eb5833d2df34d6445ef" dependencies = [ "async-trait", "axum", @@ -7899,9 +7899,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" +checksum = "50849f68853be452acf590cde0b146665b8d507b3b8af17261df47e02c209ea0" dependencies = [ "bytes", "prost", @@ -8356,9 +8356,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.120" +version = "0.2.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df52b6d9b87e0c74c9edfa1eb2d9bf85e5d63515474513aa50fa181b3c4f5db1" +checksum = "49ace1d07c165b0864824eee619580c4689389afa9dc9ed3a4c75040d82e6790" dependencies = [ "cfg-if", "once_cell", @@ -8369,9 +8369,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.70" +version = "0.4.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af934872acec734c2d80e6617bbb5ff4f12b052dd8e6332b0817bce889516084" +checksum = "96492d0d3ffba25305a7dc88720d250b1401d7edca02cc3bcd50633b424673b8" dependencies = [ "js-sys", "wasm-bindgen", @@ -8379,9 +8379,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.120" +version = "0.2.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b1041f495fb322e64aca85f5756b2172e35cd459376e67f2a6c9dffcedb103" +checksum = "8e68e6f4afd367a562002c05637acb8578ff2dea1943df76afb9e83d177c8578" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8389,9 +8389,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.120" +version = "0.2.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dcd0ff20416988a18ac686d4d4d0f6aae9ebf08a389ff5d29012b05af2a1b41" +checksum = "d95a9ec35c64b2a7cb35d3fead40c4238d0940c86d107136999567a4703259f2" dependencies = [ "bumpalo", "proc-macro2", @@ -8402,9 +8402,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.120" +version = "0.2.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49757b3c82ebf16c57d69365a142940b384176c24df52a087fb748e2085359ea" +checksum = "c4e0100b01e9f0d03189a92b96772a1fb998639d981193d7dbab487302513441" dependencies = [ "unicode-ident", ] @@ -8471,9 +8471,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.97" +version = "0.3.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eadbac71025cd7b0834f20d1fe8472e8495821b4e9801eb0a60bd1f19827602" +checksum = "4b572dff8bcf38bad0fa19729c89bb5748b2b9b1d8be70cf90df697e3a8f32aa" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 0f0671834..c4a34bbff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ members = [ exclude = ["benchv2"] [workspace.package] -version = "0.4.3-rc.4" +version = "0.5.0-beta.1" edition = "2021" rust-version = "1.92" authors = ["KalamDB Team"] @@ -161,14 +161,14 @@ rmp = "0.8" rmp-serde = "1.3" # gRPC for Raft network layer -tonic = { version = "0.14.5" } -tonic-prost = "0.14.5" +tonic = { version = "0.14.6" } +tonic-prost = "0.14.6" tonic-build = "0.14.5" prost = "0.14.3" prost-types = "0.14.3" # Additional dependencies -bcrypt = "0.19.0" +bcrypt = "0.19.1" rand = "0.10.1" rcgen = "0.14.7" x509-parser = "0.18.1" @@ -218,12 +218,12 @@ storekey = "0.11" moka = { version = "0.12.15", features = ["future", "sync"] } ntest = "0.9.5" ipnet = "2.11.0" -wasm-bindgen = { version = "0.2.120" } -wasm-bindgen-futures = { version = "0.4.70" } -js-sys = { version = "0.3.97" } +wasm-bindgen = { version = "0.2.121" } +wasm-bindgen-futures = { version = "0.4.71" } +js-sys = { version = "0.3.98" } libc = "0.2.186" libmimalloc-sys = { version = "0.1.47", features = ["extended"] } -web-sys = { version = "0.3.97" } +web-sys = { version = "0.3.98" } tsify = { version = "0.5.6", default-features = false, features = ["js"] } serde-wasm-bindgen = "0.6.5" flate2 = "1.1.9" diff --git a/README.md b/README.md index b60fe40eb..878657783 100644 --- a/README.md +++ b/README.md @@ -233,3 +233,7 @@ CREATE TABLE chat.messages ( - Website: KalamDB is under active development and evolving quickly. + +## License + +Licensed under the Apache License, Version 2.0 (`Apache-2.0`). See [LICENSE.txt](LICENSE.txt) and [NOTICE](NOTICE). diff --git a/backend/README.md b/backend/README.md index b111a9454..104ad35f4 100644 --- a/backend/README.md +++ b/backend/README.md @@ -297,4 +297,4 @@ For details, see [KNOWN_ISSUES.md](./KNOWN_ISSUES.md). ## License -MIT OR Apache-2.0 +Licensed under the Apache License, Version 2.0 (`Apache-2.0`). See [../LICENSE.txt](../LICENSE.txt) and [../NOTICE](../NOTICE). diff --git a/backend/crates/kalamdb-api/src/http/files/download.rs b/backend/crates/kalamdb-api/src/http/files/download.rs index fa15cebc0..9f3578b68 100644 --- a/backend/crates/kalamdb-api/src/http/files/download.rs +++ b/backend/crates/kalamdb-api/src/http/files/download.rs @@ -4,20 +4,26 @@ use std::sync::Arc; use actix_web::{get, web, HttpResponse, Responder}; use kalamdb_auth::AuthSessionExtractor; -use kalamdb_commons::{models::TableId, schemas::TableType, TableAccess}; +use kalamdb_commons::{ + models::{TableId, UserId}, + schemas::TableType, + Role, TableAccess, +}; use kalamdb_core::app_context::AppContext; -use kalamdb_session::{can_access_shared_table, can_impersonate_target_user, AuthSession}; +use kalamdb_session::{ + can_access_shared_table, can_access_user_table, can_impersonate_target_user, AuthSession, +}; use kalamdb_system::FileRef; use super::models::DownloadQuery; use crate::http::sql::models::{ErrorCode, SqlResponse}; -/// GET /v1/files/{namespace}/{table_name}/{subfolder}/{file_id} - Download a file +/// GET /v1/files/{namespace}/{table_name}/{subfolder}/{stored_name} - Download a file /// /// Requires Bearer token (JWT) authorization and table access permissions. /// For user tables, downloads default to the authenticated user's table scope. -/// Higher roles may supply `user_id` when the impersonation role matrix allows it. -#[get("/files/{namespace}/{table_name}/{subfolder}/{file_id}")] +/// DBA/system roles may supply `user_id` when the impersonation role matrix allows it. +#[get("/files/{namespace}/{table_name}/{subfolder}/{stored_name}")] pub async fn download_file( extractor: AuthSessionExtractor, path: web::Path<(String, String, String, String)>, @@ -27,7 +33,7 @@ pub async fn download_file( // Convert extractor to AuthSession let session: AuthSession = extractor.into(); - let (namespace, table_name, subfolder, file_id) = path.into_inner(); + let (namespace, table_name, subfolder, stored_name) = path.into_inner(); let table_id = TableId::from_strings(&namespace, &table_name); // Look up table definition from schema registry @@ -46,31 +52,29 @@ pub async fn download_file( let user_id = match table_type { TableType::User => { + if !can_access_user_table(session.role()) { + return HttpResponse::Forbidden().json(SqlResponse::error( + ErrorCode::PermissionDenied, + "User table file downloads require user-table access", + 0.0, + )); + } + let effective_user_id = if let Some(requested_user_id) = query.user_id.as_ref() { - if requested_user_id == session.user_id() { - requested_user_id.clone() - } else { - let requested_user_id = requested_user_id.clone(); - let target_role = app_context - .system_tables() - .users() - .role_for_impersonation_target(&requested_user_id); - - if !can_impersonate_target_user( - session.user_id(), - session.role(), - &requested_user_id, - target_role, - ) { - return HttpResponse::Forbidden().json(SqlResponse::error( - ErrorCode::PermissionDenied, - "Requested user is not allowed for the current role", - 0.0, - )); - } - - requested_user_id + let target_role = app_context + .system_tables() + .users() + .role_for_impersonation_target(requested_user_id); + + if !can_download_user_file_for_target(&session, requested_user_id, target_role) { + return HttpResponse::Forbidden().json(SqlResponse::error( + ErrorCode::PermissionDenied, + "Requested user is not allowed for file download", + 0.0, + )); } + + requested_user_id.clone() } else { session.user_id().clone() }; @@ -113,10 +117,10 @@ pub async fn download_file( || subfolder.contains('/') || subfolder.contains('\\') || subfolder.contains('\0') - || file_id.contains("..") - || file_id.contains('/') - || file_id.contains('\\') - || file_id.contains('\0') + || stored_name.contains("..") + || stored_name.contains('/') + || stored_name.contains('\\') + || stored_name.contains('\0') { return HttpResponse::BadRequest().json(SqlResponse::error( ErrorCode::InvalidInput, @@ -124,7 +128,7 @@ pub async fn download_file( 0.0, )); } - let relative_path = format!("{}/{}", subfolder, file_id); + let relative_path = format!("{}/{}", subfolder, stored_name); // Fetch file from storage let file_service = app_context.file_storage_service(); @@ -134,12 +138,12 @@ pub async fn download_file( { Ok(data) => { // TODO: Get content type from the stored file metadata - // Guess content type from file extension in file_id - let content_type = guess_content_type(&file_id); + // Guess content type from file extension in stored_name + let content_type = guess_content_type(&stored_name); - // SECURITY: Sanitize file_id for Content-Disposition header to prevent + // SECURITY: Sanitize stored_name for Content-Disposition header to prevent // HTTP response header injection (CRLF injection) via crafted filenames. - let safe_file_id: String = file_id + let safe_stored_name: String = stored_name .chars() .filter(|c| *c != '"' && *c != '\r' && *c != '\n' && *c != '\0') .collect(); @@ -147,12 +151,12 @@ pub async fn download_file( .content_type(content_type) .append_header(( "Content-Disposition", - format!("inline; filename=\"{}\"", safe_file_id), + format!("inline; filename=\"{}\"", safe_stored_name), )) .body(data) }, Err(e) => { - log::warn!("File download failed: table={}, file={}: {}", table_id, file_id, e); + log::warn!("File download failed: table={}, file={}: {}", table_id, stored_name, e); HttpResponse::NotFound().json(serde_json::json!({ "error": "File not found", "code": "FILE_NOT_FOUND", @@ -161,26 +165,45 @@ pub async fn download_file( } } -fn guess_content_type(file_id: &str) -> String { - mime_guess::from_path(file_id).first_or_octet_stream().to_string() +fn guess_content_type(stored_name: &str) -> String { + mime_guess::from_path(stored_name).first_or_octet_stream().to_string() +} + +fn can_download_user_file_for_target( + session: &AuthSession, + requested_user_id: &UserId, + target_role: Role, +) -> bool { + if requested_user_id == session.user_id() { + return true; + } + + matches!(session.role(), Role::System | Role::Dba) + && can_impersonate_target_user( + session.user_id(), + session.role(), + requested_user_id, + target_role, + ) } #[cfg(test)] mod tests { - use kalamdb_commons::models::UserId; - use kalamdb_commons::Role; - use super::*; #[test] - fn download_user_id_query_uses_shared_impersonation_authorization() { - let actor = UserId::new("svc"); - let same_user = UserId::new("svc"); + fn download_user_id_query_is_self_or_admin_only() { + let service = AuthSession::new(UserId::new("svc"), Role::Service); + let dba = AuthSession::new(UserId::new("dba"), Role::Dba); + let system = AuthSession::new(UserId::new("system"), Role::System); + let user = AuthSession::new(UserId::new("alice"), Role::User); let regular_target = UserId::new("alice"); let dba_target = UserId::new("dba-target"); - assert!(can_impersonate_target_user(&actor, Role::Service, &same_user, Role::Service)); - assert!(can_impersonate_target_user(&actor, Role::Service, ®ular_target, Role::User)); - assert!(!can_impersonate_target_user(&actor, Role::Service, &dba_target, Role::Dba)); + assert!(can_download_user_file_for_target(&user, ®ular_target, Role::User)); + assert!(!can_download_user_file_for_target(&service, ®ular_target, Role::User)); + assert!(can_download_user_file_for_target(&dba, ®ular_target, Role::User)); + assert!(can_download_user_file_for_target(&dba, &dba_target, Role::Dba)); + assert!(can_download_user_file_for_target(&system, &dba_target, Role::Dba)); } } diff --git a/backend/crates/kalamdb-api/src/http/files/models/download_query.rs b/backend/crates/kalamdb-api/src/http/files/models/download_query.rs index aa4db0f47..3aff6bed0 100644 --- a/backend/crates/kalamdb-api/src/http/files/models/download_query.rs +++ b/backend/crates/kalamdb-api/src/http/files/models/download_query.rs @@ -7,7 +7,7 @@ use serde::Deserialize; #[derive(Debug, Deserialize)] pub struct DownloadQuery { /// Optional user_id for user-table downloads. - /// Cross-user requests are authorized through the impersonation role matrix. + /// Cross-user raw byte downloads are limited to dba/system roles. #[serde(default, deserialize_with = "deserialize_optional_user_id")] pub user_id: Option, } diff --git a/backend/crates/kalamdb-api/src/http/topics/consume.rs b/backend/crates/kalamdb-api/src/http/topics/consume.rs index 7a9db63d8..5f3058865 100644 --- a/backend/crates/kalamdb-api/src/http/topics/consume.rs +++ b/backend/crates/kalamdb-api/src/http/topics/consume.rs @@ -59,7 +59,7 @@ pub async fn consume_handler( } let topic_id = &body.topic_id; - let group_id = &body.group_id; + let group_id = body.group_id.as_ref(); // Verify topic exists let topics_provider = app_context.system_tables().topics(); @@ -82,19 +82,22 @@ pub async fn consume_handler( // Determine start offset based on position. // - // All positions first check the consumer group's committed offset. - // If a committed offset exists, we resume from there (last_acked + 1). - // The position only matters when no offset has been committed yet: + // Grouped reads first check the consumer group's committed offset. If a + // committed offset exists, we resume from there (last_acked + 1). The + // requested position only matters when no offset has been committed yet. + // Stateless reads do not check or create group state, so the requested + // position is honored on every call: // - Earliest: start from offset 0 (replay all history) // - Latest: start from high-water mark (last offset + 1) // - Offset: start from the explicit offset - let committed_offset = + let committed_offset = group_id.and_then(|group_id| { topic_publisher.get_group_offsets(topic_id, group_id).ok().and_then(|offsets| { offsets .iter() .find(|o| o.partition_id == body.partition_id) .map(|o| o.last_acked_offset + 1) - }); + }) + }); let start_offset = match committed_offset { Some(committed) => committed, @@ -118,14 +121,25 @@ pub async fn consume_handler( }, }; - // Fetch messages - let messages = match topic_publisher.fetch_messages_for_group( - topic_id, - group_id, - body.partition_id, - start_offset, - body.limit as usize, - ) { + // Fetch messages. + let messages_result = if let Some(group_id) = group_id { + topic_publisher.fetch_messages_for_group( + topic_id, + group_id, + body.partition_id, + start_offset, + body.limit as usize, + ) + } else { + topic_publisher.fetch_messages( + topic_id, + body.partition_id, + start_offset, + body.limit as usize, + ) + }; + + let messages = match messages_result { Ok(msgs) => msgs, Err(e) => { return HttpResponse::InternalServerError().json(TopicErrorResponse::internal_error( diff --git a/backend/crates/kalamdb-api/src/http/topics/latest_offsets.rs b/backend/crates/kalamdb-api/src/http/topics/latest_offsets.rs new file mode 100644 index 000000000..117b2fdd7 --- /dev/null +++ b/backend/crates/kalamdb-api/src/http/topics/latest_offsets.rs @@ -0,0 +1,83 @@ +//! Topic latest offsets handler +//! +//! POST /v1/api/topics/latest-offsets - Resolve topic partition head offsets + +use std::collections::BTreeSet; +use std::sync::Arc; + +use actix_web::{post, web, HttpResponse, Responder}; +use kalamdb_auth::AuthSessionExtractor; +use kalamdb_commons::Role; +use kalamdb_core::app_context::AppContext; +use kalamdb_session::AuthSession; + +use super::models::{ + LatestOffsetsRequest, LatestOffsetsResponse, TopicErrorResponse, TopicPartitionLatestOffset, +}; + +/// Check if role is allowed to resolve topic offsets. +/// Must be service, dba, or system role (NOT user) +fn is_topic_authorized(session: &AuthSession) -> bool { + matches!(session.role(), Role::Service | Role::Dba | Role::System) +} + +/// POST /v1/api/topics/latest-offsets - Resolve topic partition head offsets +/// +/// # Authentication +/// Requires Bearer token authentication. +/// +/// # Authorization +/// Role must be `service`, `dba`, or `system` (NOT `user`). +#[post("/latest-offsets")] +pub async fn latest_offsets_handler( + extractor: AuthSessionExtractor, + body: web::Json, + app_context: web::Data>, +) -> impl Responder { + let session: AuthSession = extractor.into(); + + if !is_topic_authorized(&session) { + return HttpResponse::Forbidden().json(TopicErrorResponse::forbidden( + "Topic offset inspection requires service, dba, or system role", + )); + } + + let topic_publisher = app_context.topic_publisher(); + let mut seen = BTreeSet::new(); + let mut offsets = Vec::with_capacity(body.partitions.len()); + + for selector in &body.partitions { + let dedupe_key = (selector.topic_id.to_string(), selector.partition_id); + if !seen.insert(dedupe_key) { + continue; + } + + let last_offset = match topic_publisher.latest_offset(&selector.topic_id, selector.partition_id) + { + Ok(offset) => offset, + Err(error) => { + return HttpResponse::InternalServerError().json(TopicErrorResponse::internal_error( + &format!("Failed to resolve latest offset: {}", error), + )); + }, + }; + + offsets.push(TopicPartitionLatestOffset { + topic_id: selector.topic_id.clone(), + partition_id: selector.partition_id, + next_offset: last_offset.map(|offset| offset + 1).unwrap_or(0), + last_offset, + }); + } + + offsets.sort_by(|left, right| { + let topic_compare = left.topic_id.to_string().cmp(&right.topic_id.to_string()); + if topic_compare == std::cmp::Ordering::Equal { + left.partition_id.cmp(&right.partition_id) + } else { + topic_compare + } + }); + + HttpResponse::Ok().json(LatestOffsetsResponse { offsets }) +} \ No newline at end of file diff --git a/backend/crates/kalamdb-api/src/http/topics/mod.rs b/backend/crates/kalamdb-api/src/http/topics/mod.rs index 17c65e6c2..2fb2399e3 100644 --- a/backend/crates/kalamdb-api/src/http/topics/mod.rs +++ b/backend/crates/kalamdb-api/src/http/topics/mod.rs @@ -6,6 +6,7 @@ //! ## Endpoints //! - POST /v1/api/topics/consume - Consume messages from a topic //! - POST /v1/api/topics/ack - Acknowledge offset for consumer group +//! - POST /v1/api/topics/latest-offsets - Resolve topic partition head offsets //! //! **Authorization**: Endpoints require `service`, `dba`, or `system` role (NOT `user`). @@ -13,6 +14,8 @@ pub mod models; mod ack; mod consume; +mod latest_offsets; pub(crate) use ack::ack_handler; pub(crate) use consume::consume_handler; +pub(crate) use latest_offsets::latest_offsets_handler; diff --git a/backend/crates/kalamdb-api/src/http/topics/models/consume_request.rs b/backend/crates/kalamdb-api/src/http/topics/models/consume_request.rs index c1fb93173..7d794a243 100644 --- a/backend/crates/kalamdb-api/src/http/topics/models/consume_request.rs +++ b/backend/crates/kalamdb-api/src/http/topics/models/consume_request.rs @@ -19,9 +19,9 @@ pub struct ConsumeRequest { /// Topic identifier (type-safe) #[serde(deserialize_with = "deserialize_topic_id")] pub topic_id: TopicId, - /// Consumer group identifier (type-safe) - #[serde(deserialize_with = "deserialize_consumer_group_id")] - pub group_id: ConsumerGroupId, + /// Consumer group identifier (type-safe). Omit for stateless inspection reads. + #[serde(default, deserialize_with = "deserialize_optional_consumer_group_id")] + pub group_id: Option, /// Starting position: "Latest", "Earliest", or {"Offset": 12345} #[serde(default = "default_start_position")] pub start: StartPosition, @@ -44,10 +44,12 @@ where Ok(TopicId::new(&s)) } -fn deserialize_consumer_group_id<'de, D>(deserializer: D) -> Result +fn deserialize_optional_consumer_group_id<'de, D>( + deserializer: D, +) -> Result, D::Error> where D: serde::Deserializer<'de>, { - let s = String::deserialize(deserializer)?; - Ok(ConsumerGroupId::new(&s)) + let value = Option::::deserialize(deserializer)?; + Ok(value.filter(|s| !s.trim().is_empty()).map(|s| ConsumerGroupId::new(&s))) } diff --git a/backend/crates/kalamdb-api/src/http/topics/models/latest_offsets_request.rs b/backend/crates/kalamdb-api/src/http/topics/models/latest_offsets_request.rs new file mode 100644 index 000000000..a7b0c36e1 --- /dev/null +++ b/backend/crates/kalamdb-api/src/http/topics/models/latest_offsets_request.rs @@ -0,0 +1,13 @@ +//! Latest offsets request model + +use serde::Deserialize; + +use super::TopicPartitionSelector; + +/// Request body for POST /api/topics/latest-offsets +#[derive(Debug, Deserialize)] +pub struct LatestOffsetsRequest { + /// Topic partitions to resolve. + #[serde(default)] + pub partitions: Vec, +} \ No newline at end of file diff --git a/backend/crates/kalamdb-api/src/http/topics/models/latest_offsets_response.rs b/backend/crates/kalamdb-api/src/http/topics/models/latest_offsets_response.rs new file mode 100644 index 000000000..73232b638 --- /dev/null +++ b/backend/crates/kalamdb-api/src/http/topics/models/latest_offsets_response.rs @@ -0,0 +1,12 @@ +//! Latest offsets response model + +use serde::Serialize; + +use super::TopicPartitionLatestOffset; + +/// Response body for POST /api/topics/latest-offsets +#[derive(Debug, Serialize)] +pub struct LatestOffsetsResponse { + /// Latest offsets for the requested topic partitions. + pub offsets: Vec, +} \ No newline at end of file diff --git a/backend/crates/kalamdb-api/src/http/topics/models/mod.rs b/backend/crates/kalamdb-api/src/http/topics/models/mod.rs index a96935712..71a663ab4 100644 --- a/backend/crates/kalamdb-api/src/http/topics/models/mod.rs +++ b/backend/crates/kalamdb-api/src/http/topics/models/mod.rs @@ -7,7 +7,11 @@ mod ack_response; mod consume_request; mod consume_response; mod error_response; +mod latest_offsets_request; +mod latest_offsets_response; mod start_position; +mod topic_partition_latest_offset; +mod topic_partition_selector; mod topic_message; pub use ack_request::AckRequest; @@ -15,5 +19,9 @@ pub use ack_response::AckResponse; pub use consume_request::ConsumeRequest; pub use consume_response::ConsumeResponse; pub use error_response::TopicErrorResponse; +pub use latest_offsets_request::LatestOffsetsRequest; +pub use latest_offsets_response::LatestOffsetsResponse; pub use start_position::StartPosition; +pub use topic_partition_latest_offset::TopicPartitionLatestOffset; +pub use topic_partition_selector::TopicPartitionSelector; pub use topic_message::TopicMessage; diff --git a/backend/crates/kalamdb-api/src/http/topics/models/topic_partition_latest_offset.rs b/backend/crates/kalamdb-api/src/http/topics/models/topic_partition_latest_offset.rs new file mode 100644 index 000000000..d3fd1752d --- /dev/null +++ b/backend/crates/kalamdb-api/src/http/topics/models/topic_partition_latest_offset.rs @@ -0,0 +1,17 @@ +//! Topic partition latest offset model + +use kalamdb_commons::models::TopicId; +use serde::Serialize; + +/// Latest committed head offset for a topic partition. +#[derive(Debug, Serialize)] +pub struct TopicPartitionLatestOffset { + /// Topic identifier. + pub topic_id: TopicId, + /// Partition identifier. + pub partition_id: u32, + /// Next offset after the latest visible message for the partition. + pub next_offset: u64, + /// Latest visible message offset, if the partition has messages. + pub last_offset: Option, +} \ No newline at end of file diff --git a/backend/crates/kalamdb-api/src/http/topics/models/topic_partition_selector.rs b/backend/crates/kalamdb-api/src/http/topics/models/topic_partition_selector.rs new file mode 100644 index 000000000..65064040a --- /dev/null +++ b/backend/crates/kalamdb-api/src/http/topics/models/topic_partition_selector.rs @@ -0,0 +1,23 @@ +//! Topic partition selector model + +use kalamdb_commons::models::TopicId; +use serde::Deserialize; + +/// Topic + partition selector for batched topic offset lookups. +#[derive(Debug, Clone, Deserialize)] +pub struct TopicPartitionSelector { + /// Topic identifier (type-safe) + #[serde(deserialize_with = "deserialize_topic_id")] + pub topic_id: TopicId, + /// Partition ID (default 0) + #[serde(default)] + pub partition_id: u32, +} + +fn deserialize_topic_id<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + Ok(TopicId::new(&s)) +} \ No newline at end of file diff --git a/backend/crates/kalamdb-api/src/routes.rs b/backend/crates/kalamdb-api/src/routes.rs index 00d4009a3..fd0b734f8 100644 --- a/backend/crates/kalamdb-api/src/routes.rs +++ b/backend/crates/kalamdb-api/src/routes.rs @@ -67,6 +67,7 @@ pub fn configure_routes(cfg: &mut web::ServiceConfig) { .service( web::scope("/topics") .service(http::topics::consume_handler) + .service(http::topics::latest_offsets_handler) .service(http::topics::ack_handler), ), ) diff --git a/backend/crates/kalamdb-dialect/src/classifier/engine/core.rs b/backend/crates/kalamdb-dialect/src/classifier/engine/core.rs index b5c36c5dd..a285850bb 100644 --- a/backend/crates/kalamdb-dialect/src/classifier/engine/core.rs +++ b/backend/crates/kalamdb-dialect/src/classifier/engine/core.rs @@ -589,6 +589,18 @@ impl SqlStatement { crate::ddl::topic_commands::parse_ack(sql).map(SqlStatementKind::AckTopic) }) }, + ["RESET", "CONSUMER", "GROUP", ..] => { + if !is_admin { + return Err(StatementClassificationError::Unauthorized( + "Admin privileges (DBA or System role) required for consumer group reset" + .to_string(), + )); + } + Self::wrap(sql, || { + crate::ddl::topic_commands::parse_reset_consumer_group(sql) + .map(SqlStatementKind::ResetConsumerGroup) + }) + }, // Backup and restore operations - require admin ["BACKUP", "DATABASE", ..] => { @@ -843,9 +855,12 @@ impl SqlStatement { SqlStatementKind::CreateTopic(_) | SqlStatementKind::DropTopic(_) | SqlStatementKind::ClearTopic(_) - | SqlStatementKind::AddTopicSource(_) => Err("Admin privileges (DBA or System role) \ + | SqlStatementKind::AddTopicSource(_) + | SqlStatementKind::ResetConsumerGroup(_) => { + Err("Admin privileges (DBA or System role) \ required for topic management" - .to_string()), + .to_string()) + }, // Backup/Restore requires admin SqlStatementKind::BackupDatabase(_) | SqlStatementKind::RestoreDatabase(_) => { diff --git a/backend/crates/kalamdb-dialect/src/classifier/types.rs b/backend/crates/kalamdb-dialect/src/classifier/types.rs index d94deee78..61271e4b5 100644 --- a/backend/crates/kalamdb-dialect/src/classifier/types.rs +++ b/backend/crates/kalamdb-dialect/src/classifier/types.rs @@ -137,6 +137,8 @@ pub enum SqlStatementKind { ConsumeTopic(ConsumeStatement), /// ACK GROUP '' [PARTITION ] UPTO OFFSET AckTopic(AckStatement), + /// RESET CONSUMER GROUP '' ON [PARTITION ] TO + ResetConsumerGroup(ResetConsumerGroupStatement), // ===== User Management ===== /// CREATE USER WITH ... @@ -285,6 +287,7 @@ impl SqlStatement { | SqlStatementKind::DropTopic(_) | SqlStatementKind::ClearTopic(_) | SqlStatementKind::AddTopicSource(_) + | SqlStatementKind::ResetConsumerGroup(_) | SqlStatementKind::CreateUser(_) | SqlStatementKind::AlterUser(_) | SqlStatementKind::DropUser(_) @@ -350,6 +353,7 @@ impl SqlStatement { SqlStatementKind::AddTopicSource(_) => "ALTER TOPIC ADD SOURCE", SqlStatementKind::ConsumeTopic(_) => "CONSUME FROM", SqlStatementKind::AckTopic(_) => "ACK", + SqlStatementKind::ResetConsumerGroup(_) => "RESET CONSUMER GROUP", SqlStatementKind::CreateUser(_) => "CREATE USER", SqlStatementKind::AlterUser(_) => "ALTER USER", SqlStatementKind::DropUser(_) => "DROP USER", diff --git a/backend/crates/kalamdb-dialect/src/ddl.rs b/backend/crates/kalamdb-dialect/src/ddl.rs index 60a9a96e7..0ffa0d3ba 100644 --- a/backend/crates/kalamdb-dialect/src/ddl.rs +++ b/backend/crates/kalamdb-dialect/src/ddl.rs @@ -62,7 +62,7 @@ pub use storage_commands::{ pub use subscribe_commands::SubscribeStatement; pub use topic_commands::{ AckStatement, AddTopicSourceStatement, ClearTopicStatement, ConsumePosition, ConsumeStatement, - CreateTopicStatement, DropTopicStatement, + CreateTopicStatement, DropTopicStatement, ResetConsumerGroupStatement, }; pub use use_namespace::UseNamespaceStatement; pub use user_commands::{ diff --git a/backend/crates/kalamdb-dialect/src/ddl/topic_commands.rs b/backend/crates/kalamdb-dialect/src/ddl/topic_commands.rs index d165c42df..a2c571708 100644 --- a/backend/crates/kalamdb-dialect/src/ddl/topic_commands.rs +++ b/backend/crates/kalamdb-dialect/src/ddl/topic_commands.rs @@ -5,6 +5,7 @@ //! - DROP TOPIC: Remove a topic //! - ALTER TOPIC ADD SOURCE: Add a table route to a topic //! - CONSUME FROM: Consume messages from a topic +//! - RESET CONSUMER GROUP: Move a consumer group cursor to a specific next offset use kalamdb_commons::models::{PayloadMode, TableId, TopicOp}; @@ -111,6 +112,23 @@ pub struct AckStatement { pub upto_offset: u64, } +/// RESET CONSUMER GROUP statement for moving a group cursor. +/// +/// Syntax: +/// ```sql +/// RESET CONSUMER GROUP '' +/// ON +/// [PARTITION ] +/// TO ; +/// ``` +#[derive(Debug, Clone, PartialEq)] +pub struct ResetConsumerGroupStatement { + pub topic_name: String, + pub group_id: String, + pub partition_id: u32, + pub next_offset: u64, +} + // Implement DdlAst trait for all topic statement types impl DdlAst for CreateTopicStatement {} impl DdlAst for DropTopicStatement {} @@ -118,6 +136,7 @@ impl DdlAst for ClearTopicStatement {} impl DdlAst for AddTopicSourceStatement {} impl DdlAst for ConsumeStatement {} impl DdlAst for AckStatement {} +impl DdlAst for ResetConsumerGroupStatement {} /// Parse CREATE TOPIC statement /// @@ -335,6 +354,39 @@ pub fn parse_ack(sql: &str) -> Result { }) } +/// Parse RESET CONSUMER GROUP statement. +/// +/// Syntax: RESET CONSUMER GROUP '' ON [PARTITION ] TO +pub fn parse_reset_consumer_group(sql: &str) -> Result { + let normalized = normalize_sql(sql); + let sql_upper = normalized.to_uppercase(); + + if !sql_upper.starts_with("RESET CONSUMER GROUP ") { + return Err("Expected RESET CONSUMER GROUP statement".to_string()); + } + + let group_id = extract_reset_group_id(&normalized)?; + let topic_name = extract_reset_topic_name(&normalized)?; + + let partition_id = if sql_upper.contains(" PARTITION ") { + let partition_str = extract_keyword_value(&normalized, "PARTITION")?; + partition_str + .parse::() + .map_err(|_| "PARTITION must be a non-negative integer".to_string())? + } else { + 0 + }; + + let next_offset = extract_reset_next_offset(&normalized)?; + + Ok(ResetConsumerGroupStatement { + topic_name, + group_id, + partition_id, + next_offset, + }) +} + // Helper functions // TODO: We aready have a method inside tableId for this. Refactor to use that. @@ -444,6 +496,67 @@ fn extract_group_id(sql: &str) -> Result { Ok(group_id) } +fn extract_reset_group_id(sql: &str) -> Result { + let sql_upper = sql.to_uppercase(); + let prefix = "RESET CONSUMER GROUP "; + let group_pos = sql_upper + .find(prefix) + .ok_or_else(|| "RESET CONSUMER GROUP keyword not found".to_string())?; + let after_group = &sql[group_pos + prefix.len()..]; + + if after_group.starts_with('\'') { + let end_quote = after_group[1..] + .find('\'') + .ok_or_else(|| "Unclosed quote in group ID".to_string())?; + return Ok(after_group[1..end_quote + 1].to_string()); + } + + if after_group.starts_with('"') { + let end_quote = after_group[1..] + .find('"') + .ok_or_else(|| "Unclosed quote in group ID".to_string())?; + return Ok(after_group[1..end_quote + 1].to_string()); + } + + let group_id = after_group + .split_whitespace() + .next() + .ok_or_else(|| "Missing consumer group ID".to_string())?; + Ok(group_id.to_string()) +} + +fn extract_reset_topic_name(sql: &str) -> Result { + let sql_upper = sql.to_uppercase(); + let on_pos = sql_upper.find(" ON ").ok_or_else(|| "RESET requires ON ".to_string())?; + let after_on = &sql[on_pos + 4..]; + let topic_name = after_on + .split_whitespace() + .next() + .ok_or_else(|| "Missing topic name after ON".to_string())? + .trim_end_matches(';'); + + if topic_name.is_empty() { + return Err("Missing topic name after ON".to_string()); + } + + Ok(topic_name.to_string()) +} + +fn extract_reset_next_offset(sql: &str) -> Result { + let sql_upper = sql.to_uppercase(); + let to_pos = sql_upper.find(" TO ").ok_or_else(|| "RESET requires TO ".to_string())?; + let after_to = sql[to_pos + 4..].trim(); + let offset_str = after_to + .split_whitespace() + .next() + .ok_or_else(|| "Missing offset after TO".to_string())? + .trim_end_matches(';'); + + offset_str + .parse::() + .map_err(|_| format!("Invalid offset '{}'. Must be a non-negative integer", offset_str)) +} + fn parse_consume_position_from_sql(sql: &str) -> Result { let sql_upper = sql.to_uppercase(); @@ -539,4 +652,27 @@ mod tests { assert_eq!(stmt.partition_id, 2); assert_eq!(stmt.upto_offset, 500); } + + #[test] + fn test_parse_reset_consumer_group_basic() { + let stmt = + parse_reset_consumer_group("RESET CONSUMER GROUP 'ai-service' ON app.messages TO 0") + .unwrap(); + assert_eq!(stmt.topic_name, "app.messages"); + assert_eq!(stmt.group_id, "ai-service"); + assert_eq!(stmt.partition_id, 0); + assert_eq!(stmt.next_offset, 0); + } + + #[test] + fn test_parse_reset_consumer_group_with_partition() { + let stmt = parse_reset_consumer_group( + "RESET CONSUMER GROUP 'ai-service' ON app.messages PARTITION 2 TO 500;", + ) + .unwrap(); + assert_eq!(stmt.topic_name, "app.messages"); + assert_eq!(stmt.group_id, "ai-service"); + assert_eq!(stmt.partition_id, 2); + assert_eq!(stmt.next_offset, 500); + } } diff --git a/backend/crates/kalamdb-dialect/src/parser/extensions.rs b/backend/crates/kalamdb-dialect/src/parser/extensions.rs index 21ab70505..236841037 100644 --- a/backend/crates/kalamdb-dialect/src/parser/extensions.rs +++ b/backend/crates/kalamdb-dialect/src/parser/extensions.rs @@ -36,7 +36,7 @@ pub use crate::ddl::subscribe_commands::SubscribeStatement; // Topic pub/sub commands pub use crate::ddl::topic_commands::{ AddTopicSourceStatement, ClearTopicStatement, ConsumePosition, ConsumeStatement, - CreateTopicStatement, DropTopicStatement, + CreateTopicStatement, DropTopicStatement, ResetConsumerGroupStatement, }; // User commands (CREATE USER, ALTER USER, DROP USER) pub use crate::ddl::user_commands::{ @@ -79,6 +79,8 @@ pub enum ExtensionStatement { AddTopicSource(AddTopicSourceStatement), /// CONSUME FROM command (pub/sub) ConsumeTopic(ConsumeStatement), + /// RESET CONSUMER GROUP command (pub/sub) + ResetConsumerGroup(ResetConsumerGroupStatement), /// CREATE USER command CreateUser(CreateUserStatement), /// ALTER USER command @@ -294,6 +296,16 @@ impl ExtensionStatement { ) { return result; } + if let Some(result) = Self::parse_with_prefix( + sql, + &sql_upper, + &["RESET CONSUMER GROUP"], + crate::ddl::topic_commands::parse_reset_consumer_group, + ExtensionStatement::ResetConsumerGroup, + "RESET CONSUMER GROUP", + ) { + return result; + } if let Some(result) = Self::parse_with_prefix( sql, &sql_upper, diff --git a/backend/crates/kalamdb-handlers/crates/stream/src/lib.rs b/backend/crates/kalamdb-handlers/crates/stream/src/lib.rs index dd9dce7f0..2e846f090 100644 --- a/backend/crates/kalamdb-handlers/crates/stream/src/lib.rs +++ b/backend/crates/kalamdb-handlers/crates/stream/src/lib.rs @@ -9,7 +9,7 @@ use kalamdb_sql::{ classifier::SqlStatementKind, ddl::{ AckStatement, AddTopicSourceStatement, ClearTopicStatement, ConsumePosition, - ConsumeStatement, CreateTopicStatement, DropTopicStatement, + ConsumeStatement, CreateTopicStatement, DropTopicStatement, ResetConsumerGroupStatement, }, }; @@ -77,7 +77,19 @@ pub fn register_stream_handlers(registry: &HandlerRegistry, app_context: Arc Result { + let schema = Arc::new(Schema::new(vec![ + Field::new("topic", DataType::Utf8, false), + Field::new("group_id", DataType::Utf8, false), + Field::new("partition", DataType::Int32, false), + Field::new("next_offset", DataType::Int64, false), + ])); + + let mut topic_builder = StringBuilder::new(); + let mut group_builder = StringBuilder::new(); + topic_builder.append_value(topic_name); + group_builder.append_value(group_id); + + let batch = RecordBatch::try_new( + schema.clone(), + vec![ + Arc::new(topic_builder.finish()) as ArrayRef, + Arc::new(group_builder.finish()) as ArrayRef, + Arc::new(Int32Array::from(vec![partition_id as i32])) as ArrayRef, + Arc::new(Int64Array::from(vec![next_offset as i64])) as ArrayRef, + ], + ) + .map_err(|e| { + KalamDbError::SerializationError(format!("Failed to create RecordBatch: {}", e)) + })?; + + Ok(ExecutionResult::Rows { + batches: vec![batch], + row_count: 1, + schema: Some(schema), + }) +} diff --git a/backend/crates/kalamdb-handlers/crates/stream/src/topics/mod.rs b/backend/crates/kalamdb-handlers/crates/stream/src/topics/mod.rs index f2901e536..513bdfa7b 100644 --- a/backend/crates/kalamdb-handlers/crates/stream/src/topics/mod.rs +++ b/backend/crates/kalamdb-handlers/crates/stream/src/topics/mod.rs @@ -4,6 +4,7 @@ mod clear; mod consume; mod create; mod drop; +mod reset_consumer_group; pub use ack::AckHandler; pub use add_source::AddTopicSourceHandler; @@ -11,3 +12,4 @@ pub use clear::ClearTopicHandler; pub use consume::ConsumeHandler; pub use create::CreateTopicHandler; pub use drop::DropTopicHandler; +pub use reset_consumer_group::ResetConsumerGroupHandler; diff --git a/backend/crates/kalamdb-handlers/crates/stream/src/topics/reset_consumer_group.rs b/backend/crates/kalamdb-handlers/crates/stream/src/topics/reset_consumer_group.rs new file mode 100644 index 000000000..f82b410e0 --- /dev/null +++ b/backend/crates/kalamdb-handlers/crates/stream/src/topics/reset_consumer_group.rs @@ -0,0 +1,71 @@ +use std::sync::Arc; + +use kalamdb_commons::{ + models::{ConsumerGroupId, TopicId}, + Role, +}; +use kalamdb_core::{ + app_context::AppContext, + error::KalamDbError, + sql::{ + context::{ExecutionContext, ExecutionResult, ScalarValue}, + executor::handlers::TypedStatementHandler, + }, +}; +use kalamdb_sql::ddl::ResetConsumerGroupStatement; + +use crate::result_rows; + +pub struct ResetConsumerGroupHandler { + app_context: Arc, +} + +impl ResetConsumerGroupHandler { + pub fn new(app_context: Arc) -> Self { + Self { app_context } + } +} + +impl TypedStatementHandler for ResetConsumerGroupHandler { + async fn execute( + &self, + statement: ResetConsumerGroupStatement, + _params: Vec, + _context: &ExecutionContext, + ) -> Result { + let topic_id = TopicId::new(&statement.topic_name); + let group_id = ConsumerGroupId::new(&statement.group_id); + + let topics_provider = self.app_context.system_tables().topics(); + let _topic = topics_provider.get_topic_by_id_async(&topic_id).await?.ok_or_else(|| { + KalamDbError::NotFound(format!("Topic '{}' does not exist", statement.topic_name)) + })?; + + self.app_context + .topic_publisher() + .reset_group_offset(&topic_id, &group_id, statement.partition_id, statement.next_offset) + .map_err(|e| { + KalamDbError::InvalidOperation(format!("Failed to reset consumer group: {}", e)) + })?; + + result_rows::reset_consumer_group_result( + &statement.topic_name, + &statement.group_id, + statement.partition_id, + statement.next_offset, + ) + } + + async fn check_authorization( + &self, + _statement: &ResetConsumerGroupStatement, + context: &ExecutionContext, + ) -> Result<(), KalamDbError> { + match context.user_role() { + Role::Dba | Role::System => Ok(()), + _ => Err(KalamDbError::PermissionDenied( + "Only dba or system roles can reset consumer group offsets".to_string(), + )), + } + } +} diff --git a/backend/crates/kalamdb-jobs/src/executors/backup.rs b/backend/crates/kalamdb-jobs/src/executors/backup.rs index ccdcbb0f7..97ae4d9d3 100644 --- a/backend/crates/kalamdb-jobs/src/executors/backup.rs +++ b/backend/crates/kalamdb-jobs/src/executors/backup.rs @@ -72,11 +72,8 @@ impl BackupExecutor { } fn is_archive_path(path: &Path) -> bool { - let value = path - .to_string_lossy() - .trim() - .trim_end_matches(['/', '\\']) - .to_ascii_lowercase(); + let value = + path.to_string_lossy().trim().trim_end_matches(['/', '\\']).to_ascii_lowercase(); value.ends_with(".tar.gz") || value.ends_with(".tgz") } @@ -400,11 +397,8 @@ mod tests { fs::create_dir_all(source_root.join("rocksdb")).expect("rocksdb dir"); fs::create_dir_all(source_root.join("storage/app/messages")).expect("storage dir"); fs::write(source_root.join("rocksdb/CURRENT"), "manifest").expect("rocksdb file"); - fs::write( - source_root.join("storage/app/messages/part-1.parquet"), - "parquet", - ) - .expect("storage file"); + fs::write(source_root.join("storage/app/messages/part-1.parquet"), "parquet") + .expect("storage file"); fs::write(source_root.join("server.toml"), "port = 8080\n").expect("config file"); let archive_path = temp_dir.path().join("backup.tar.gz"); diff --git a/backend/crates/kalamdb-jobs/src/executors/restore.rs b/backend/crates/kalamdb-jobs/src/executors/restore.rs index 2096cb3d5..7b8419f5f 100644 --- a/backend/crates/kalamdb-jobs/src/executors/restore.rs +++ b/backend/crates/kalamdb-jobs/src/executors/restore.rs @@ -71,11 +71,8 @@ impl RestoreExecutor { } fn is_archive_path(path: &Path) -> bool { - let value = path - .to_string_lossy() - .trim() - .trim_end_matches(['/', '\\']) - .to_ascii_lowercase(); + let value = + path.to_string_lossy().trim().trim_end_matches(['/', '\\']).to_ascii_lowercase(); value.ends_with(".tar.gz") || value.ends_with(".tgz") } @@ -148,7 +145,10 @@ impl RestoreExecutor { let relative_path = relative_path.into_owned(); if relative_path.components().any(|component| { - matches!(component, Component::ParentDir | Component::RootDir | Component::Prefix(_)) + matches!( + component, + Component::ParentDir | Component::RootDir | Component::Prefix(_) + ) }) { return Err(KalamDbError::InvalidOperation( "Backup archive contains an unsafe path".to_string(), @@ -351,11 +351,8 @@ mod tests { fs::create_dir_all(source_root.join("rocksdb")).expect("rocksdb dir"); fs::create_dir_all(source_root.join("storage/app/messages")).expect("storage dir"); fs::write(source_root.join("rocksdb/CURRENT"), "manifest").expect("rocksdb file"); - fs::write( - source_root.join("storage/app/messages/part-1.parquet"), - "parquet", - ) - .expect("storage file"); + fs::write(source_root.join("storage/app/messages/part-1.parquet"), "parquet") + .expect("storage file"); let archive_path = temp_dir.path().join("restore.tar.gz"); let archive_file = File::create(&archive_path).expect("archive file"); diff --git a/backend/crates/kalamdb-jobs/src/health_monitor.rs b/backend/crates/kalamdb-jobs/src/health_monitor.rs index 7f8b71546..8ca964b85 100644 --- a/backend/crates/kalamdb-jobs/src/health_monitor.rs +++ b/backend/crates/kalamdb-jobs/src/health_monitor.rs @@ -1,4 +1,10 @@ -use std::{sync::Arc, time::Duration}; +use std::{ + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; use kalamdb_core::{app_context::AppContext, error::KalamDbError}; // Re-export the WebSocket session tracking functions from kalamdb-observability @@ -8,6 +14,13 @@ pub use kalamdb_observability::{ }; const IDLE_TRIM_GRACE: Duration = Duration::from_secs(60); +const IDLE_TRIM_INTERVAL: Duration = Duration::from_secs(5 * 60); + +static LAST_IDLE_TRIM_MS: AtomicU64 = AtomicU64::new(0); + +fn epoch_millis() -> u64 { + SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_millis() as u64 +} /// Monitor for system health and job statistics /// @@ -17,6 +30,11 @@ const IDLE_TRIM_GRACE: Duration = Duration::from_secs(60); pub struct HealthMonitor; impl HealthMonitor { + /// Run low-frequency idle maintenance without collecting full health metrics. + pub fn maintain_idle_resources(app_context: &AppContext) { + Self::maybe_trim_idle_memory(app_context); + } + fn maybe_trim_idle_memory(app_context: &AppContext) { let active_connections = app_context.connection_registry().connection_count(); let active_subscriptions = app_context.connection_registry().subscription_count(); @@ -26,11 +44,27 @@ impl HealthMonitor { return; } - let idle_for = idle_duration().unwrap_or(Duration::MAX); + let Some(idle_for) = idle_duration() else { + return; + }; if idle_for < IDLE_TRIM_GRACE { return; } + let now_ms = epoch_millis(); + let last_trim_ms = LAST_IDLE_TRIM_MS.load(Ordering::Acquire); + if last_trim_ms != 0 + && now_ms.saturating_sub(last_trim_ms) < IDLE_TRIM_INTERVAL.as_millis() as u64 + { + return; + } + if LAST_IDLE_TRIM_MS + .compare_exchange(last_trim_ms, now_ms, Ordering::AcqRel, Ordering::Acquire) + .is_err() + { + return; + } + let mut cleared_plan_cache = 0usize; if let Some(sql_executor) = app_context.try_sql_executor() { cleared_plan_cache = sql_executor.plan_cache_len(); @@ -43,7 +77,6 @@ impl HealthMonitor { app_context.connection_registry().trim_idle_capacity(); } kalamdb_observability::force_allocator_collection(true); - record_activity_now(); if cleared_plan_cache > 0 { log::info!( @@ -67,7 +100,6 @@ impl HealthMonitor { /// /// Logs a curated summary rendered from the same key/value rows exposed by system.stats. pub async fn log_metrics(app_context: Arc) -> Result<(), KalamDbError> { - Self::maybe_trim_idle_memory(app_context.as_ref()); let metrics = app_context.compute_metrics_async().await?; kalamdb_observability::HealthMonitor::log_system_stats(&metrics); Ok(()) diff --git a/backend/crates/kalamdb-jobs/src/jobs_manager/runner.rs b/backend/crates/kalamdb-jobs/src/jobs_manager/runner.rs index 613c208d8..084b60fb9 100644 --- a/backend/crates/kalamdb-jobs/src/jobs_manager/runner.rs +++ b/backend/crates/kalamdb-jobs/src/jobs_manager/runner.rs @@ -270,13 +270,25 @@ impl JobsManager { }; let wal_cleanup_enabled = wal_cleanup_interval.is_some(); - // Leadership check interval (for cluster mode) - let mut leadership_interval = tokio::time::interval_at( - Instant::now() + Duration::from_secs(1), - Duration::from_secs(1), - ); - leadership_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - let mut is_leader = self.is_cluster_leader().await; + // Leadership checks are only useful in cluster mode. In standalone mode this node is + // always the leader, so avoid a permanent 1s idle wake-up. + let cluster_mode = app_context.executor().is_cluster_mode(); + let mut leadership_interval = if cluster_mode { + let mut interval = tokio::time::interval_at( + Instant::now() + Duration::from_secs(1), + Duration::from_secs(1), + ); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + Some(interval) + } else { + None + }; + let leadership_enabled = leadership_interval.is_some(); + let mut is_leader = if cluster_mode { + self.is_cluster_leader().await + } else { + true + }; let mut was_leader = is_leader; let max_concurrent = max_concurrent.max(1); let semaphore = Arc::new(Semaphore::new(max_concurrent)); @@ -286,7 +298,7 @@ impl JobsManager { // Adaptive idle polling (reduces CPU in empty systems) let idle_poll_min_ms: u64 = 500; - let idle_poll_max_ms: u64 = 5_000; + let idle_poll_max_ms: u64 = 30_000; let mut idle_poll_ms = idle_poll_min_ms; let mut poll_interval = tokio::time::interval_at( Instant::now() + Duration::from_millis(idle_poll_ms), @@ -320,7 +332,14 @@ impl JobsManager { // Priority 2: fallback polling for crash recovery/retries _ = poll_interval.tick() => None, // Periodic leadership check - _ = leadership_interval.tick() => { + _ = async { + if leadership_enabled { + let interval = leadership_interval + .as_mut() + .expect("leadership interval missing"); + interval.tick().await; + } + }, if leadership_enabled => { let leader_now = self.is_cluster_leader().await; if leader_now && !was_leader { log::info!("[JobLoop] This node became leader - handling failover"); @@ -335,8 +354,11 @@ impl JobsManager { // Periodic health metrics logging (all nodes) _ = health_interval.tick() => { let app_ctx = self.get_attached_app_context(); - if let Err(e) = HealthMonitor::log_metrics(app_ctx).await { - log::warn!("Failed to log health metrics: {}", e); + HealthMonitor::maintain_idle_resources(app_ctx.as_ref()); + if log::log_enabled!(Level::Debug) { + if let Err(e) = HealthMonitor::log_metrics(app_ctx).await { + log::warn!("Failed to log health metrics: {}", e); + } } continue; } diff --git a/backend/crates/kalamdb-observability/src/health_monitor.rs b/backend/crates/kalamdb-observability/src/health_monitor.rs index b8e237a77..b2182d32d 100644 --- a/backend/crates/kalamdb-observability/src/health_monitor.rs +++ b/backend/crates/kalamdb-observability/src/health_monitor.rs @@ -62,7 +62,7 @@ pub struct HealthCounts { pub jobs_total: usize, } -/// Descriptor class breakdown captured from `lsof`. +/// Descriptor class breakdown captured from process file descriptors. #[derive(Debug, Clone, Copy, Default)] pub struct OpenFileBreakdown { pub total: usize, @@ -362,8 +362,50 @@ impl HealthMonitor { } /// Count open file descriptors for the current process (Unix only) - #[cfg(unix)] + #[cfg(target_os = "linux")] fn collect_open_file_breakdown() -> Option { + Self::collect_proc_fd_breakdown().or_else(Self::collect_lsof_open_file_breakdown) + } + + #[cfg(all(unix, not(target_os = "linux")))] + fn collect_open_file_breakdown() -> Option { + Self::collect_lsof_open_file_breakdown() + } + + #[cfg(target_os = "linux")] + fn collect_proc_fd_breakdown() -> Option { + use std::{fs, os::unix::fs::FileTypeExt}; + + let entries = fs::read_dir("/proc/self/fd").ok()?; + let mut breakdown = OpenFileBreakdown::default(); + + for entry in entries.flatten() { + breakdown.total += 1; + + match fs::metadata(entry.path()) { + Ok(metadata) => { + let file_type = metadata.file_type(); + if file_type.is_file() { + breakdown.regular += 1; + } else if file_type.is_dir() { + breakdown.directories += 1; + } else if file_type.is_socket() { + breakdown.other += 1; + } else { + breakdown.other += 1; + } + }, + Err(_) => { + breakdown.other += 1; + }, + } + } + + Some(breakdown) + } + + #[cfg(unix)] + fn collect_lsof_open_file_breakdown() -> Option { use std::process::Command; let output = Command::new("lsof") diff --git a/backend/crates/kalamdb-observability/src/runtime_metrics.rs b/backend/crates/kalamdb-observability/src/runtime_metrics.rs index 9b5369928..9304f9e4a 100644 --- a/backend/crates/kalamdb-observability/src/runtime_metrics.rs +++ b/backend/crates/kalamdb-observability/src/runtime_metrics.rs @@ -26,6 +26,20 @@ pub struct RuntimeMetrics { pub memory_rss_gap_mb: Option, pub memory_physical_footprint_bytes: Option, pub memory_physical_footprint_mb: Option, + pub memory_linux_rollup_rss_bytes: Option, + pub memory_linux_rollup_rss_mb: Option, + pub memory_linux_pss_bytes: Option, + pub memory_linux_pss_mb: Option, + pub memory_linux_anonymous_bytes: Option, + pub memory_linux_anonymous_mb: Option, + pub memory_linux_private_clean_bytes: Option, + pub memory_linux_private_clean_mb: Option, + pub memory_linux_private_dirty_bytes: Option, + pub memory_linux_private_dirty_mb: Option, + pub memory_linux_shared_clean_bytes: Option, + pub memory_linux_shared_clean_mb: Option, + pub memory_linux_shared_dirty_bytes: Option, + pub memory_linux_shared_dirty_mb: Option, pub cpu_usage_percent: Option, pub system_total_memory_mb: u64, pub system_used_memory_mb: u64, @@ -73,6 +87,64 @@ impl RuntimeMetrics { if let Some(mb) = self.memory_physical_footprint_mb { pairs.push(("memory_physical_footprint_mb".to_string(), mb.to_string())); } + push_optional_pair( + &mut pairs, + "memory_linux_rollup_rss_bytes", + self.memory_linux_rollup_rss_bytes, + ); + push_optional_pair( + &mut pairs, + "memory_linux_rollup_rss_mb", + self.memory_linux_rollup_rss_mb, + ); + push_optional_pair(&mut pairs, "memory_linux_pss_bytes", self.memory_linux_pss_bytes); + push_optional_pair(&mut pairs, "memory_linux_pss_mb", self.memory_linux_pss_mb); + push_optional_pair( + &mut pairs, + "memory_linux_anonymous_bytes", + self.memory_linux_anonymous_bytes, + ); + push_optional_pair(&mut pairs, "memory_linux_anonymous_mb", self.memory_linux_anonymous_mb); + push_optional_pair( + &mut pairs, + "memory_linux_private_clean_bytes", + self.memory_linux_private_clean_bytes, + ); + push_optional_pair( + &mut pairs, + "memory_linux_private_clean_mb", + self.memory_linux_private_clean_mb, + ); + push_optional_pair( + &mut pairs, + "memory_linux_private_dirty_bytes", + self.memory_linux_private_dirty_bytes, + ); + push_optional_pair( + &mut pairs, + "memory_linux_private_dirty_mb", + self.memory_linux_private_dirty_mb, + ); + push_optional_pair( + &mut pairs, + "memory_linux_shared_clean_bytes", + self.memory_linux_shared_clean_bytes, + ); + push_optional_pair( + &mut pairs, + "memory_linux_shared_clean_mb", + self.memory_linux_shared_clean_mb, + ); + push_optional_pair( + &mut pairs, + "memory_linux_shared_dirty_bytes", + self.memory_linux_shared_dirty_bytes, + ); + push_optional_pair( + &mut pairs, + "memory_linux_shared_dirty_mb", + self.memory_linux_shared_dirty_mb, + ); if let Some(cpu) = self.cpu_usage_percent { pairs.push(("cpu_usage_percent".to_string(), format!("{:.2}", cpu))); } @@ -158,6 +230,7 @@ pub fn collect_runtime_metrics(start_time: Instant) -> RuntimeMetrics { let mut memory_rss_gap_mb = None; let mut memory_physical_footprint_bytes = None; let mut memory_physical_footprint_mb = None; + let mut linux_memory_breakdown = None; let mut memory_usage_source = "rss"; let mut cpu_usage_percent = None; #[allow(unused_mut)] @@ -181,6 +254,8 @@ pub fn collect_runtime_metrics(start_time: Instant) -> RuntimeMetrics { memory_physical_footprint_bytes.map(|bytes| bytes / 1024 / 1024); } + linux_memory_breakdown = current_linux_memory_breakdown_bytes(); + if let Some(footprint_bytes) = memory_physical_footprint_bytes { memory_usage_source = "physical_footprint"; memory_bytes = Some(footprint_bytes); @@ -209,6 +284,19 @@ pub fn collect_runtime_metrics(start_time: Instant) -> RuntimeMetrics { let system_total_memory_mb = sys.total_memory() / 1024 / 1024; let system_used_memory_mb = sys.used_memory() / 1024 / 1024; + let memory_linux_rollup_rss_bytes = linux_memory_breakdown.as_ref().and_then(|b| b.rss_bytes); + let memory_linux_pss_bytes = linux_memory_breakdown.as_ref().and_then(|b| b.pss_bytes); + let memory_linux_anonymous_bytes = + linux_memory_breakdown.as_ref().and_then(|b| b.anonymous_bytes); + let memory_linux_private_clean_bytes = + linux_memory_breakdown.as_ref().and_then(|b| b.private_clean_bytes); + let memory_linux_private_dirty_bytes = + linux_memory_breakdown.as_ref().and_then(|b| b.private_dirty_bytes); + let memory_linux_shared_clean_bytes = + linux_memory_breakdown.as_ref().and_then(|b| b.shared_clean_bytes); + let memory_linux_shared_dirty_bytes = + linux_memory_breakdown.as_ref().and_then(|b| b.shared_dirty_bytes); + RuntimeMetrics { uptime_seconds, uptime_human, @@ -223,6 +311,20 @@ pub fn collect_runtime_metrics(start_time: Instant) -> RuntimeMetrics { memory_rss_gap_mb, memory_physical_footprint_bytes, memory_physical_footprint_mb, + memory_linux_rollup_rss_bytes, + memory_linux_rollup_rss_mb: memory_linux_rollup_rss_bytes.map(bytes_to_mb), + memory_linux_pss_bytes, + memory_linux_pss_mb: memory_linux_pss_bytes.map(bytes_to_mb), + memory_linux_anonymous_bytes, + memory_linux_anonymous_mb: memory_linux_anonymous_bytes.map(bytes_to_mb), + memory_linux_private_clean_bytes, + memory_linux_private_clean_mb: memory_linux_private_clean_bytes.map(bytes_to_mb), + memory_linux_private_dirty_bytes, + memory_linux_private_dirty_mb: memory_linux_private_dirty_bytes.map(bytes_to_mb), + memory_linux_shared_clean_bytes, + memory_linux_shared_clean_mb: memory_linux_shared_clean_bytes.map(bytes_to_mb), + memory_linux_shared_dirty_bytes, + memory_linux_shared_dirty_mb: memory_linux_shared_dirty_bytes.map(bytes_to_mb), cpu_usage_percent, system_total_memory_mb, system_used_memory_mb, @@ -232,6 +334,71 @@ pub fn collect_runtime_metrics(start_time: Instant) -> RuntimeMetrics { } } +fn push_optional_pair(pairs: &mut Vec<(String, String)>, name: &str, value: Option) { + if let Some(value) = value { + pairs.push((name.to_string(), value.to_string())); + } +} + +fn bytes_to_mb(bytes: u64) -> u64 { + bytes / 1024 / 1024 +} + +#[derive(Debug, Default)] +struct LinuxMemoryBreakdown { + rss_bytes: Option, + pss_bytes: Option, + anonymous_bytes: Option, + private_clean_bytes: Option, + private_dirty_bytes: Option, + shared_clean_bytes: Option, + shared_dirty_bytes: Option, +} + +#[cfg(target_os = "linux")] +fn current_linux_memory_breakdown_bytes() -> Option { + let contents = std::fs::read_to_string("/proc/self/smaps_rollup").ok()?; + let mut breakdown = LinuxMemoryBreakdown::default(); + + for line in contents.lines() { + if breakdown.rss_bytes.is_none() { + breakdown.rss_bytes = parse_smaps_kb_line(line, "Rss:"); + } + if breakdown.pss_bytes.is_none() { + breakdown.pss_bytes = parse_smaps_kb_line(line, "Pss:"); + } + if breakdown.anonymous_bytes.is_none() { + breakdown.anonymous_bytes = parse_smaps_kb_line(line, "Anonymous:"); + } + if breakdown.private_clean_bytes.is_none() { + breakdown.private_clean_bytes = parse_smaps_kb_line(line, "Private_Clean:"); + } + if breakdown.private_dirty_bytes.is_none() { + breakdown.private_dirty_bytes = parse_smaps_kb_line(line, "Private_Dirty:"); + } + if breakdown.shared_clean_bytes.is_none() { + breakdown.shared_clean_bytes = parse_smaps_kb_line(line, "Shared_Clean:"); + } + if breakdown.shared_dirty_bytes.is_none() { + breakdown.shared_dirty_bytes = parse_smaps_kb_line(line, "Shared_Dirty:"); + } + } + + Some(breakdown) +} + +#[cfg(not(target_os = "linux"))] +fn current_linux_memory_breakdown_bytes() -> Option { + None +} + +#[cfg(target_os = "linux")] +fn parse_smaps_kb_line(line: &str, key: &str) -> Option { + let value = line.strip_prefix(key)?.trim(); + let kilobytes = value.strip_suffix("kB")?.trim().parse::().ok()?; + Some(kilobytes * 1024) +} + #[cfg(target_os = "macos")] pub(crate) fn current_process_physical_footprint_bytes(pid: Option) -> Option { let mut usage = std::mem::MaybeUninit::::zeroed(); diff --git a/backend/crates/kalamdb-publisher/src/service.rs b/backend/crates/kalamdb-publisher/src/service.rs index 1021bf74a..182de6d27 100644 --- a/backend/crates/kalamdb-publisher/src/service.rs +++ b/backend/crates/kalamdb-publisher/src/service.rs @@ -704,6 +704,28 @@ impl TopicPublisherService { Ok(()) } + /// Reset a consumer group partition to a specific next offset. + /// + /// This force-sets persisted progress and replaces any in-memory pending + /// claims for the same topic/group/partition so follow-up reads start at the + /// requested offset without waiting for claim expiry. + pub fn reset_group_offset( + &self, + topic_id: &TopicId, + group_id: &ConsumerGroupId, + partition_id: u32, + next_offset: u64, + ) -> Result<()> { + self.offset_store + .reset_offset(topic_id, group_id, partition_id, next_offset) + .map_err(|e| CommonError::Internal(format!("Failed to reset offset: {}", e)))?; + + let cursor_key = GroupPartitionKey::new(topic_id, group_id, partition_id); + self.group_claim_state.insert(cursor_key, ClaimState::new(next_offset)); + + Ok(()) + } + /// Get all committed offsets for a consumer group on a topic. pub fn get_group_offsets( &self, diff --git a/backend/crates/kalamdb-store/src/backends/rocksdb/backend.rs b/backend/crates/kalamdb-store/src/backends/rocksdb/backend.rs index b2fc18ac9..827c07f82 100644 --- a/backend/crates/kalamdb-store/src/backends/rocksdb/backend.rs +++ b/backend/crates/kalamdb-store/src/backends/rocksdb/backend.rs @@ -57,11 +57,11 @@ impl RocksDBBackend { sync_writes: bool, disable_wal: bool, settings: RocksDbSettings, + block_cache: Cache, ) -> Self { let mut write_opts = WriteOptions::default(); write_opts.set_sync(sync_writes); write_opts.disable_wal(disable_wal); - let block_cache = Cache::new_lru_cache(settings.block_cache_size); let backend = Self { db, write_opts, @@ -76,7 +76,9 @@ impl RocksDBBackend { /// Creates a new RocksDB backend with the given database handle. pub fn new(db: Arc) -> Self { - Self::new_internal(db, false, false, RocksDbSettings::default()) + let settings = RocksDbSettings::default(); + let block_cache = Cache::new_lru_cache(settings.block_cache_size); + Self::new_internal(db, false, false, settings, block_cache) } /// Creates a new backend with write options and explicit RocksDB tuning settings. @@ -86,7 +88,19 @@ impl RocksDBBackend { disable_wal: bool, settings: RocksDbSettings, ) -> Self { - Self::new_internal(db, sync_writes, disable_wal, settings) + let block_cache = Cache::new_lru_cache(settings.block_cache_size); + Self::new_internal(db, sync_writes, disable_wal, settings, block_cache) + } + + /// Creates a new backend using the same block cache used to open existing column families. + pub(crate) fn with_options_settings_and_cache( + db: Arc, + sync_writes: bool, + disable_wal: bool, + settings: RocksDbSettings, + block_cache: Cache, + ) -> Self { + Self::new_internal(db, sync_writes, disable_wal, settings, block_cache) } /// Set the known physical column family names. diff --git a/backend/crates/kalamdb-store/src/backends/rocksdb/init.rs b/backend/crates/kalamdb-store/src/backends/rocksdb/init.rs index 656327c5e..cb644841c 100644 --- a/backend/crates/kalamdb-store/src/backends/rocksdb/init.rs +++ b/backend/crates/kalamdb-store/src/backends/rocksdb/init.rs @@ -51,6 +51,11 @@ impl RocksDbInit { /// Open and return both the DB and the list of column family names. pub fn open_with_cf_names(&self) -> Result<(Arc, Vec)> { + let (db, cf_names, _) = self.open_with_cf_names_and_cache()?; + Ok((db, cf_names)) + } + + pub(crate) fn open_with_cf_names_and_cache(&self) -> Result<(Arc, Vec, Cache)> { let mut db_opts = Options::default(); db_opts.create_if_missing(true); db_opts.create_missing_column_families(true); @@ -60,7 +65,8 @@ impl RocksDbInit { let block_opts = create_block_options_with_cache(&cache); db_opts.set_block_based_table_factory(&block_opts); - self.open_internal(&db_opts, &cache) + let (db, cf_names) = self.open_internal(&db_opts, &cache)?; + Ok((db, cf_names, cache)) } fn open_internal(&self, db_opts: &Options, cache: &Cache) -> Result<(Arc, Vec)> { diff --git a/backend/crates/kalamdb-store/src/backends/rocksdb/mod.rs b/backend/crates/kalamdb-store/src/backends/rocksdb/mod.rs index 842858b8b..be1affe99 100644 --- a/backend/crates/kalamdb-store/src/backends/rocksdb/mod.rs +++ b/backend/crates/kalamdb-store/src/backends/rocksdb/mod.rs @@ -19,12 +19,13 @@ pub fn open_storage_backend( settings: &kalamdb_configs::RocksDbSettings, ) -> anyhow::Result<(Arc, usize)> { let db_init = RocksDbInit::new(db_path.to_string_lossy().into_owned(), settings.clone()); - let (db, cf_names) = db_init.open_with_cf_names()?; - let backend = Arc::new(RocksDBBackend::with_options_and_settings( + let (db, cf_names, block_cache) = db_init.open_with_cf_names_and_cache()?; + let backend = Arc::new(RocksDBBackend::with_options_settings_and_cache( db, settings.sync_writes, settings.disable_wal, settings.clone(), + block_cache, )); backend.set_known_cf_names(cf_names.clone()); Ok((backend, cf_names.len())) diff --git a/backend/crates/kalamdb-system/src/providers/topic_offsets/topic_offsets_provider.rs b/backend/crates/kalamdb-system/src/providers/topic_offsets/topic_offsets_provider.rs index 4cde417d1..0dff793ba 100644 --- a/backend/crates/kalamdb-system/src/providers/topic_offsets/topic_offsets_provider.rs +++ b/backend/crates/kalamdb-system/src/providers/topic_offsets/topic_offsets_provider.rs @@ -153,6 +153,34 @@ impl TopicOffsetsTableProvider { self.store.insert(&key, &row).into_system_error("update offset error") } + /// Reset a consumer group partition so the next fetch starts at `next_offset`. + /// + /// `next_offset = 0` removes the committed offset row because the table stores + /// `last_acked_offset` and there is no valid offset before zero. + pub fn reset_offset( + &self, + topic_id: &TopicId, + group_id: &ConsumerGroupId, + partition_id: u32, + next_offset: u64, + ) -> Result<(), SystemError> { + let key = Self::make_key(topic_id, group_id, partition_id); + + if next_offset == 0 { + return self.store.delete(&key).into_system_error("delete offset error"); + } + + let topic_offset = TopicOffset::new( + topic_id.clone(), + group_id.clone(), + partition_id, + next_offset - 1, + chrono::Utc::now().timestamp_millis(), + ); + let row = Self::encode_offset_row(&topic_offset)?; + self.store.insert(&key, &row).into_system_error("reset offset error") + } + /// Delete all offsets for a consumer group pub fn delete_group_offsets( &self, diff --git a/backend/server.toml b/backend/server.toml index c9f7a4336..4fb320376 100644 --- a/backend/server.toml +++ b/backend/server.toml @@ -318,14 +318,7 @@ strict_ws_origin_check = false # Bind-to-all-interfaces still needs an explicit browser allowlist. # Add your public hostname(s) here for reverse-proxy or production deployments. allowed_origins = [ - "http://localhost:4173", - "http://127.0.0.1:4173", - "http://localhost:5173", - "http://127.0.0.1:5173", - "http://localhost:5174", - "http://127.0.0.1:5174", - "http://localhost:8080", - "http://127.0.0.1:8080", + "*", ] # Allowed HTTP methods (default: common REST methods) diff --git a/backend/tests/integration_tests/topic_pubsub.rs b/backend/tests/integration_tests/topic_pubsub.rs index fa643aeb2..056ae9e0f 100644 --- a/backend/tests/integration_tests/topic_pubsub.rs +++ b/backend/tests/integration_tests/topic_pubsub.rs @@ -301,6 +301,30 @@ async fn ack_topic_offset(server: &TestServer, topic: &str, group: &str, upto_of ); } +async fn reset_topic_group_offset(server: &TestServer, topic: &str, group: &str, next_offset: u64) { + let response = server + .execute_sql(&format!("RESET CONSUMER GROUP '{}' ON {} TO {}", group, topic, next_offset)) + .await; + assert_eq!( + response.status, + ResponseStatus::Success, + "RESET CONSUMER GROUP should succeed for topic='{}' group='{}' next_offset={}: {:?}", + topic, + group, + next_offset, + response.error + ); + + let returned_next_offset = response + .rows_as_maps() + .first() + .and_then(|row| row.get("next_offset")) + .map(parse_i64) + .expect("RESET CONSUMER GROUP should return next_offset") + as u64; + assert_eq!(returned_next_offset, next_offset); +} + async fn assert_topic_offset_state( server: &TestServer, topic: &str, @@ -1195,6 +1219,75 @@ async fn test_sql_group_from_offset_starts_at_requested_offset_and_persists_resu assert_topic_offset_state(&server, &topic, &group, Some(5)).await; } +#[tokio::test] +#[ntest::timeout(45000)] +#[serial] +async fn test_sql_reset_consumer_group_moves_cursor_and_clears_claims() { + let server = TestServer::new_shared().await; + let _cache_guard = TopicPublisherCacheGuard { + app_context: server.app_context.clone(), + }; + + let (topic, source_table) = setup_topic_source_fixture(&server, "tp_sql_reset").await; + let group = format!("reset-{}", consolidated_helpers::unique_table("group")); + + for id in 0..5 { + let insert = server + .execute_sql(&format!( + "INSERT INTO {} (id, payload) VALUES ({}, 'payload_{}')", + source_table, id, id + )) + .await; + assert_eq!(insert.status, ResponseStatus::Success); + } + + let readiness_sql = format!("CONSUME FROM {} FROM EARLIEST LIMIT 10", topic); + let ready = wait_until_sql_consume_row_count_at_least(&server, &readiness_sql, 5).await; + assert_eq!(ready.row_count(), 5, "Expected stateless consume to observe all rows"); + + let first = wait_until_sql_consume_row_count_at_least( + &server, + &format!("CONSUME FROM {} GROUP '{}' FROM EARLIEST LIMIT 2", topic, group), + 2, + ) + .await; + assert_eq!(row_offsets(&first), vec![0, 1]); + + let second = wait_until_sql_consume_row_count_at_least( + &server, + &format!("CONSUME FROM {} GROUP '{}' FROM EARLIEST LIMIT 2", topic, group), + 2, + ) + .await; + assert_eq!(row_offsets(&second), vec![2, 3]); + + reset_topic_group_offset(&server, &topic, &group, 0).await; + assert_topic_offset_state(&server, &topic, &group, None).await; + + let replay = wait_until_sql_consume_row_count_at_least( + &server, + &format!("CONSUME FROM {} GROUP '{}' FROM EARLIEST LIMIT 2", topic, group), + 2, + ) + .await; + assert_eq!( + row_offsets(&replay), + vec![0, 1], + "Reset to 0 should clear pending claims and replay immediately" + ); + + reset_topic_group_offset(&server, &topic, &group, 3).await; + assert_topic_offset_state(&server, &topic, &group, Some(2)).await; + + let from_three = wait_until_sql_consume_row_count_at_least( + &server, + &format!("CONSUME FROM {} GROUP '{}' FROM EARLIEST LIMIT 2", topic, group), + 2, + ) + .await; + assert_eq!(row_offsets(&from_three), vec![3, 4]); +} + #[tokio::test] #[ntest::timeout(30000)] async fn test_sql_consume_without_group_is_stateless_and_does_not_persist_offsets() { @@ -1458,6 +1551,101 @@ async fn test_http_api_consume_ack_option_combinations() { ); } +#[tokio::test] +#[ntest::timeout(60000)] +async fn test_http_api_consume_without_group_is_stateless() { + let server = http_server::get_global_server().await; + let namespace = consolidated_helpers::unique_namespace("tp_http_stateless"); + let table = consolidated_helpers::unique_table("events"); + let topic_table = consolidated_helpers::unique_table("topic"); + let topic = format!("{}.{}", namespace, topic_table); + let source_table = format!("{}.{}", namespace, table); + + let auth_header = server.bearer_auth_header("root").expect("Failed to create root auth header"); + + let create_namespace = server + .execute_sql(&format!("CREATE NAMESPACE {}", namespace)) + .await + .expect("CREATE NAMESPACE request failed"); + assert_eq!(create_namespace.status, ResponseStatus::Success); + + let create_table = server + .execute_sql(&format!("CREATE TABLE {} (id INT PRIMARY KEY, payload TEXT)", source_table)) + .await + .expect("CREATE TABLE request failed"); + assert_eq!(create_table.status, ResponseStatus::Success); + + let create_topic = server + .execute_sql(&format!("CREATE TOPIC {} PARTITIONS 1", topic)) + .await + .expect("CREATE TOPIC request failed"); + assert_eq!(create_topic.status, ResponseStatus::Success); + + let add_source = server + .execute_sql(&format!("ALTER TOPIC {} ADD SOURCE {} ON INSERT", topic, source_table)) + .await + .expect("ALTER TOPIC ADD SOURCE request failed"); + assert_eq!(add_source.status, ResponseStatus::Success); + wait_for_topic_routes(server, &topic, 1).await; + + for id in 0..3 { + let insert = server + .execute_sql(&format!( + "INSERT INTO {} (id, payload) VALUES ({}, 'event_{}')", + source_table, id, id + )) + .await + .expect("INSERT request failed"); + assert_eq!(insert.status, ResponseStatus::Success, "INSERT {} failed", id); + } + + let request_body = json!({ + "topic_id": topic, + "start": "earliest", + "limit": 2, + "partition_id": 0, + "timeout_seconds": 1 + }); + + let deadline = tokio::time::Instant::now() + tokio::time::Duration::from_secs(20); + let first = loop { + let (status, payload) = + post_topics_consume(server, &auth_header, request_body.clone()).await; + assert_eq!(status, StatusCode::OK, "Stateless consume failed: {:?}", payload); + let response: HttpConsumeResponse = + serde_json::from_value(payload).expect("Consume payload should deserialize"); + if response.messages.len() == 2 { + break response; + } + if tokio::time::Instant::now() >= deadline { + panic!("Timed out waiting for stateless HTTP consume to return messages"); + } + tokio::time::sleep(tokio::time::Duration::from_millis(120)).await; + }; + + let (second_status, second_payload) = + post_topics_consume(server, &auth_header, request_body).await; + assert_eq!(second_status, StatusCode::OK, "Second stateless consume failed"); + let second: HttpConsumeResponse = + serde_json::from_value(second_payload).expect("Second consume payload should deserialize"); + + let first_offsets: Vec = first.messages.iter().map(|message| message.offset).collect(); + let second_offsets: Vec = second.messages.iter().map(|message| message.offset).collect(); + assert_eq!(first_offsets, vec![0, 1]); + assert_eq!(second_offsets, vec![0, 1]); + + let topic_offsets = server + .app_context() + .system_tables() + .topic_offsets() + .get_topic_offsets(&TopicId::new(&topic)) + .expect("Failed to read topic offsets"); + assert!( + topic_offsets.is_empty(), + "Stateless HTTP consume must not create consumer group offsets" + ); +} + /// HTTP API integration: two consumers in the same group should not receive /// overlapping offsets when polling concurrently in batches. #[tokio::test] diff --git a/backend/tests/testserver/files/test_file_permissions_http.rs b/backend/tests/testserver/files/test_file_permissions_http.rs index 7db2836fd..edc8ca569 100644 --- a/backend/tests/testserver/files/test_file_permissions_http.rs +++ b/backend/tests/testserver/files/test_file_permissions_http.rs @@ -391,6 +391,14 @@ async fn test_user_file_access_matrix() -> anyhow::Result<()> { let stored_name_b = file_ref_b.stored_name(); let client = reqwest::Client::new(); + let download_a_owner_scope = format!( + "{}/v1/files/{}/{}/{}/{}", + server.base_url(), + namespace, + table_name, + file_ref_a.sub, + stored_name_a + ); let download_a = format!( "{}/v1/files/{}/{}/{}/{}?user_id={}", server.base_url(), @@ -410,6 +418,16 @@ async fn test_user_file_access_matrix() -> anyhow::Result<()> { userb_id ); + let anonymous_on_a = client.get(&download_a_owner_scope).send().await?; + assert_ne!(anonymous_on_a.status(), reqwest::StatusCode::OK); + + let usera_own_no_query = client + .get(&download_a_owner_scope) + .header("Authorization", usera_auth.clone()) + .send() + .await?; + assert_eq!(usera_own_no_query.status(), reqwest::StatusCode::OK); + let usera_on_b = client .get(&download_b) .header("Authorization", usera_auth.clone()) @@ -424,7 +442,21 @@ async fn test_user_file_access_matrix() -> anyhow::Result<()> { .await?; assert_eq!(userb_on_a.status(), reqwest::StatusCode::FORBIDDEN); - for auth in [&service_auth, &dba_auth, &root_auth] { + let service_on_a = client + .get(&download_a) + .header("Authorization", service_auth.clone()) + .send() + .await?; + assert_eq!(service_on_a.status(), reqwest::StatusCode::FORBIDDEN); + + let service_without_user_id = client + .get(&download_a_owner_scope) + .header("Authorization", service_auth.clone()) + .send() + .await?; + assert_ne!(service_without_user_id.status(), reqwest::StatusCode::OK); + + for auth in [&dba_auth, &root_auth] { let resp_a = client.get(&download_a).header("Authorization", auth.clone()).send().await?; assert_eq!(resp_a.status(), reqwest::StatusCode::OK); diff --git a/benchv2/README.md b/benchv2/README.md index ebd09dfdf..b5786cded 100644 --- a/benchv2/README.md +++ b/benchv2/README.md @@ -108,4 +108,5 @@ Contributions are welcome! Please follow the standard Git workflow: 3. Submit a pull request with a description of your changes. ## License -This project is licensed under the MIT License. See the LICENSE file for details. \ No newline at end of file + +Licensed under the Apache License, Version 2.0 (`Apache-2.0`). See [../LICENSE.txt](../LICENSE.txt) and [../NOTICE](../NOTICE). \ No newline at end of file diff --git a/cli/README.md b/cli/README.md index 3d6d4c562..1b8479e64 100644 --- a/cli/README.md +++ b/cli/README.md @@ -537,7 +537,7 @@ cargo build --release ## License -Same license as KalamDB main project. +Licensed under the Apache License, Version 2.0 (`Apache-2.0`). See [../LICENSE.txt](../LICENSE.txt) and [../NOTICE](../NOTICE). ## Contributing diff --git a/cli/src/session.rs b/cli/src/session.rs index f1e796e5f..9dff80084 100644 --- a/cli/src/session.rs +++ b/cli/src/session.rs @@ -1823,7 +1823,7 @@ impl CLISession { eprintln!("Press Ctrl+C (or 'q') to unsubscribe and return to CLI\n"); } - let mut subscription = self.client.subscribe_with_config(config).await?; + let mut subscription = self.client.live_events_with_config(config).await?; if self.animations { eprintln!("Subscription established (ID: {})", subscription.subscription_id()); @@ -1985,7 +1985,7 @@ impl CLISession { eprintln!(); } - let mut subscription = self.client.subscribe_with_config(config).await?; + let mut subscription = self.client.live_events_with_config(config).await?; if self.animations { eprintln!("Subscription established (ID: {})", subscription.subscription_id()); diff --git a/cli/tests/cluster/cluster_test_subscription_nodes.rs b/cli/tests/cluster/cluster_test_subscription_nodes.rs index 8fa16b5b0..ef9e08021 100644 --- a/cli/tests/cluster/cluster_test_subscription_nodes.rs +++ b/cli/tests/cluster/cluster_test_subscription_nodes.rs @@ -94,7 +94,7 @@ async fn subscribe_with_retry( ) -> SubscriptionManager { let mut last_error: Option = None; for attempt in 0..max_attempts { - let mut subscription = client.subscribe(query).await.expect("Failed to subscribe"); + let mut subscription = client.live_events(query).await.expect("Failed to subscribe"); if let Ok(Some(Ok(event))) = tokio::time::timeout(Duration::from_secs(5), subscription.next()).await @@ -379,7 +379,7 @@ fn cluster_test_subscription_multi_node_identical() { cluster_runtime().block_on(async { // Create subscription on leader only (Spec 021: leader-only reads) let leader_client = create_ws_client(&leader_url); - let mut subscription = leader_client.subscribe(&query).await.expect("Failed to subscribe"); + let mut subscription = leader_client.live_events(&query).await.expect("Failed to subscribe"); let received_count = Arc::new(std::sync::atomic::AtomicUsize::new(0)); // Insert multiple rows on leader @@ -609,7 +609,7 @@ fn cluster_test_subscription_user_table_any_node() { .expect("Failed to build client"); let query = format!("SELECT * FROM {}", full); - match client.subscribe(&query).await { + match client.live_events(&query).await { Ok(_sub) => { println!(" ✓ Node {} accepts user table subscription", idx); }, diff --git a/cli/tests/cluster/cluster_test_ws_follower.rs b/cli/tests/cluster/cluster_test_ws_follower.rs index 010e6d9ec..64682fb87 100644 --- a/cli/tests/cluster/cluster_test_ws_follower.rs +++ b/cli/tests/cluster/cluster_test_ws_follower.rs @@ -76,7 +76,7 @@ async fn subscribe_with_retry( ) -> SubscriptionManager { let mut last_error: Option = None; for attempt in 0..max_attempts { - let mut subscription = client.subscribe(query).await.expect("Failed to subscribe"); + let mut subscription = client.live_events(query).await.expect("Failed to subscribe"); if let Ok(Some(Ok(event))) = tokio::time::timeout(Duration::from_secs(5), subscription.next()).await diff --git a/cli/tests/common/mod.rs b/cli/tests/common/mod.rs index 3b73e15c8..f4276a036 100644 --- a/cli/tests/common/mod.rs +++ b/cli/tests/common/mod.rs @@ -4827,7 +4827,7 @@ impl SubscriptionListener { }; // Start subscription - let mut subscription = match client.subscribe(&query).await { + let mut subscription = match client.live_events(&query).await { Ok(s) => s, Err(e) => { let _ = event_tx.send(format!("ERROR: Failed to subscribe: {}", e)); diff --git a/cli/tests/connection/concurrent_ws_tests.rs b/cli/tests/connection/concurrent_ws_tests.rs index fcee42c8f..981ce75f5 100644 --- a/cli/tests/connection/concurrent_ws_tests.rs +++ b/cli/tests/connection/concurrent_ws_tests.rs @@ -99,7 +99,7 @@ fn test_concurrent_websocket_subscriptions() { }; let mut sub = match client - .subscribe_with_config(SubscriptionConfig::without_initial_data( + .live_events_with_config(SubscriptionConfig::without_initial_data( format!("conc_sub_{}", i), &query, )) @@ -278,7 +278,7 @@ fn test_rapid_connect_disconnect() { let subscribe_result = tokio::time::timeout( Duration::from_secs(8), - client.subscribe_with_config(SubscriptionConfig::without_initial_data( + client.live_events_with_config(SubscriptionConfig::without_initial_data( format!("rapid_{}", i), &query, )), diff --git a/cli/tests/smoke/security/smoke_test_security_access.rs b/cli/tests/smoke/security/smoke_test_security_access.rs index a07966789..15ecb3a5d 100644 --- a/cli/tests/smoke/security/smoke_test_security_access.rs +++ b/cli/tests/smoke/security/smoke_test_security_access.rs @@ -52,7 +52,7 @@ fn subscribe_as_user(username: &str, password: &str, query: &str) -> Result<(), .map_err(|e| format!("Failed to build runtime: {}", e))?; let subscribe_result = rt.block_on(async move { - let mut subscription = client.subscribe(query).await?; + let mut subscription = client.live_events(query).await?; // Wait for the first event: ACK means success, Error means permission denied. // The server sends permission errors as WebSocket error events, not as connection diff --git a/cli/tests/smoke/subscription/smoke_test_shared_table_subscription.rs b/cli/tests/smoke/subscription/smoke_test_shared_table_subscription.rs index 6e34f7883..ebedcbb00 100644 --- a/cli/tests/smoke/subscription/smoke_test_shared_table_subscription.rs +++ b/cli/tests/smoke/subscription/smoke_test_shared_table_subscription.rs @@ -37,7 +37,7 @@ fn try_subscribe_as_user(username: &str, password: &str, query: &str) -> Result< .map_err(|e| format!("Failed to build runtime: {}", e))?; let result = rt.block_on(async move { - let mut subscription = client.subscribe(query).await?; + let mut subscription = client.live_events(query).await?; // The server sends permission errors as WebSocket error events, not as connection // failures. We must read at least one event to detect the server's response. diff --git a/cli/tests/smoke/subscription/smoke_test_subscription_advanced.rs b/cli/tests/smoke/subscription/smoke_test_subscription_advanced.rs index c13d3d0d6..79a99d80a 100644 --- a/cli/tests/smoke/subscription/smoke_test_subscription_advanced.rs +++ b/cli/tests/smoke/subscription/smoke_test_subscription_advanced.rs @@ -109,7 +109,7 @@ impl SubscriptionListenerAdvanced { if let Some(opts) = options.as_ref() { config.options = Some(opts.clone()); } - match client.subscribe_with_config(config).await { + match client.live_events_with_config(config).await { Ok(s) => { subscription = Some(s); break; diff --git a/cli/tests/smoke/subscription/smoke_test_subscription_close.rs b/cli/tests/smoke/subscription/smoke_test_subscription_close.rs index 2494ef843..7648aa83b 100644 --- a/cli/tests/smoke/subscription/smoke_test_subscription_close.rs +++ b/cli/tests/smoke/subscription/smoke_test_subscription_close.rs @@ -101,7 +101,7 @@ fn smoke_subscription_explicit_close_removes_live_query() { let query_sql = format!("SELECT * FROM {} -- {}", full_clone, marker_clone); let cfg = SubscriptionConfig::new(format!("sub_{}", marker_clone), query_sql); - let mut sub = match client.subscribe_with_config(cfg).await { + let mut sub = match client.live_events_with_config(cfg).await { Ok(s) => s, Err(e) => { eprintln!("subscribe failed: {e}"); @@ -209,7 +209,7 @@ fn smoke_subscription_drop_removes_live_query() { let query_sql = format!("SELECT * FROM {} -- {}", full_clone, marker_clone); let cfg = SubscriptionConfig::new(format!("sub_{}", marker_clone), query_sql); - let mut sub = match client.subscribe_with_config(cfg).await { + let mut sub = match client.live_events_with_config(cfg).await { Ok(s) => s, Err(e) => { eprintln!("subscribe failed: {e}"); diff --git a/cli/tests/smoke/subscription/smoke_test_subscription_listing.rs b/cli/tests/smoke/subscription/smoke_test_subscription_listing.rs index 69f6e714e..dd7f788f0 100644 --- a/cli/tests/smoke/subscription/smoke_test_subscription_listing.rs +++ b/cli/tests/smoke/subscription/smoke_test_subscription_listing.rs @@ -78,7 +78,7 @@ fn smoke_subscription_listing_and_close_removes() { // 2) Subscribe let query_sql = format!("SELECT * FROM {}", full_clone); let cfg = SubscriptionConfig::new("sub_list_1".to_string(), query_sql.clone()); - let mut sub1 = client.subscribe_with_config(cfg).await.expect("subscribe 1"); + let mut sub1 = client.live_events_with_config(cfg).await.expect("subscribe 1"); // Wait for ack let ack_deadline = tokio::time::Instant::now() + Duration::from_secs(5); @@ -108,7 +108,7 @@ fn smoke_subscription_listing_and_close_removes() { // 4) Subscribe to a second query let cfg2 = SubscriptionConfig::new("sub_list_2".to_string(), query_sql.clone()); - let mut sub2 = client.subscribe_with_config(cfg2).await.expect("subscribe 2"); + let mut sub2 = client.live_events_with_config(cfg2).await.expect("subscribe 2"); // Wait for ack let ack_deadline2 = tokio::time::Instant::now() + Duration::from_secs(5); loop { @@ -208,7 +208,7 @@ fn smoke_subscription_listing_tracks_seq_id() { "sub_seq_1".to_string(), format!("SELECT * FROM {}", full_clone), ); - let mut sub = client.subscribe_with_config(cfg).await.expect("subscribe"); + let mut sub = client.live_events_with_config(cfg).await.expect("subscribe"); // Wait for ack let ack_deadline = tokio::time::Instant::now() + Duration::from_secs(5); diff --git a/cli/tests/smoke/subscription/smoke_test_subscription_multi_reconnect.rs b/cli/tests/smoke/subscription/smoke_test_subscription_multi_reconnect.rs index c7194ba5a..07a09cf78 100644 --- a/cli/tests/smoke/subscription/smoke_test_subscription_multi_reconnect.rs +++ b/cli/tests/smoke/subscription/smoke_test_subscription_multi_reconnect.rs @@ -132,11 +132,11 @@ fn smoke_subscription_multi_reconnect_parallel() { // Subscribe to BOTH queries (mirrors App.tsx with messages + agent_events). let mut sub_a = client - .subscribe_with_config(SubscriptionConfig::new(&sub_id_a, &query_a)) + .live_events_with_config(SubscriptionConfig::new(&sub_id_a, &query_a)) .await .expect("subscribe A"); let mut sub_b = client - .subscribe_with_config(SubscriptionConfig::new(&sub_id_b, &query_b)) + .live_events_with_config(SubscriptionConfig::new(&sub_id_b, &query_b)) .await .expect("subscribe B"); diff --git a/cli/tests/smoke/subscription/smoke_test_subscription_reconnect_resume.rs b/cli/tests/smoke/subscription/smoke_test_subscription_reconnect_resume.rs index abee3ec72..130a21f2e 100644 --- a/cli/tests/smoke/subscription/smoke_test_subscription_reconnect_resume.rs +++ b/cli/tests/smoke/subscription/smoke_test_subscription_reconnect_resume.rs @@ -146,7 +146,7 @@ fn smoke_subscription_reconnect_basic_resume() { let sub_id = format!("recon_basic_{}", ns); let mut sub = client - .subscribe_with_config(SubscriptionConfig::new(&sub_id, &query)) + .live_events_with_config(SubscriptionConfig::new(&sub_id, &query)) .await .expect("subscribe"); @@ -196,7 +196,7 @@ fn smoke_subscription_reconnect_basic_resume() { let sub_id2 = format!("recon_basic2_{}", ns); let mut sub2 = client - .subscribe_with_config(SubscriptionConfig::new(&sub_id2, &query)) + .live_events_with_config(SubscriptionConfig::new(&sub_id2, &query)) .await .expect("re-subscribe after reconnect"); @@ -281,7 +281,7 @@ fn smoke_subscription_resume_from_seq_id() { let sub_id = format!("recon_seq_{}", ns); let mut sub = client - .subscribe_with_config(SubscriptionConfig::new(&sub_id, &query)) + .live_events_with_config(SubscriptionConfig::new(&sub_id, &query)) .await .expect("subscribe"); @@ -356,7 +356,7 @@ fn smoke_subscription_resume_from_seq_id() { cfg2.options = Some(options); let mut sub2 = - client.subscribe_with_config(cfg2).await.expect("re-subscribe with from_seq_id"); + client.live_events_with_config(cfg2).await.expect("re-subscribe with from_seq_id"); // Gap rows must arrive (as catch-up initial data or change events). let resume_events = collect_until(&mut sub2, event_timeout(15), |evs| { diff --git a/cli/tests/smoke/usecases/smoke_test_batch_control.rs b/cli/tests/smoke/usecases/smoke_test_batch_control.rs index ea9a1dc3e..a89c1f24b 100644 --- a/cli/tests/smoke/usecases/smoke_test_batch_control.rs +++ b/cli/tests/smoke/usecases/smoke_test_batch_control.rs @@ -226,7 +226,7 @@ impl BatchSubscriptionListener { config.options = Some(opts); } - let mut subscription = match client.subscribe_with_config(config).await { + let mut subscription = match client.live_events_with_config(config).await { Ok(s) => s, Err(e) => { let _ = event_tx.send(format!("ERROR: Failed to subscribe: {}", e)); diff --git a/cli/tests/subscription/slow_subscriber.rs b/cli/tests/subscription/slow_subscriber.rs index 61887ea39..16ca03533 100644 --- a/cli/tests/subscription/slow_subscriber.rs +++ b/cli/tests/subscription/slow_subscriber.rs @@ -162,7 +162,7 @@ fn subscription_slow_consumer_initial_data() { let (events, hit_error) = rt.block_on(async { // Use a generous initial_data_timeout so slow processing doesn't kill us let client = slow_client(180, 180).expect("client"); - let mut sub = client.subscribe(&query).await.expect("subscribe"); + let mut sub = client.live_events(&query).await.expect("subscribe"); drain_with_delay( &mut sub, (total_rows + 5) as usize, @@ -242,7 +242,7 @@ fn subscription_3g_like_high_latency() { // Single runtime handles both phases: initial data and live change events let (initial_ok, change_found) = rt.block_on(async { let client = slow_client(60, 60).expect("client"); - let mut sub = client.subscribe(&query).await.expect("subscribe"); + let mut sub = client.live_events(&query).await.expect("subscribe"); // Wait 300ms (simulated RTT) before starting to read tokio::time::sleep(Duration::from_millis(300)).await; @@ -318,7 +318,7 @@ fn subscription_slow_consumer_concurrent_writes() { let (insert_count, error_count) = rt.block_on(async { let client = slow_client(180, 180).expect("client"); - let mut sub = client.subscribe(&query).await.expect("subscribe"); + let mut sub = client.live_events(&query).await.expect("subscribe"); // Drain initial empty Ack let (_, _) = @@ -405,7 +405,7 @@ fn subscription_reconnect_after_drop() { let (first_count, second_count) = rt.block_on(async { // ── First connection: read a couple events then drop abruptly ── let client1 = slow_client(60, 60).expect("client1"); - let mut sub1 = client1.subscribe(&query).await.expect("subscribe1"); + let mut sub1 = client1.live_events(&query).await.expect("subscribe1"); let (evs1, _) = drain_with_delay(&mut sub1, 3, Duration::from_millis(0), Duration::from_secs(10)).await; let c1 = evs1.len(); @@ -418,7 +418,7 @@ fn subscription_reconnect_after_drop() { // ── Second connection: full subscription, verify snapshot ── let client2 = slow_client(60, 60).expect("client2"); - let mut sub2 = client2.subscribe(&query).await.expect("subscribe2"); + let mut sub2 = client2.live_events(&query).await.expect("subscribe2"); let (evs2, hit_err2) = drain_with_delay(&mut sub2, 15, Duration::from_millis(0), Duration::from_secs(30)) .await; @@ -501,7 +501,7 @@ fn subscription_timeout_graceful_then_reconnect() { .expect("tight client"); // This might succeed quickly or fail — either is acceptable - match client_tight.subscribe(&query).await { + match client_tight.live_events(&query).await { Ok(mut sub) => { let (evs, _) = drain_with_delay( &mut sub, @@ -526,7 +526,7 @@ fn subscription_timeout_graceful_then_reconnect() { // ── Phase 2: normal timeouts – must succeed ── let client_normal = slow_client(30, 30).expect("normal client"); - let mut sub_normal = client_normal.subscribe(&query).await.expect("normal subscribe"); + let mut sub_normal = client_normal.live_events(&query).await.expect("normal subscribe"); let (evs_normal, hit_err) = drain_with_delay( &mut sub_normal, 40, @@ -630,7 +630,7 @@ fn subscription_multiple_concurrent_slow_subscribers() { }, }; - let mut sub = match client.subscribe(&q).await { + let mut sub = match client.live_events(&q).await { Ok(s) => s, Err(e) => { eprintln!("[SUB {}] subscribe error: {}", idx, e); @@ -757,7 +757,7 @@ fn subscription_large_initial_data_slow_batch_consumer() { ); let mut config = SubscriptionConfig::new(&sub_id, &query); config.options = Some(SubscriptionOptions::default().with_batch_size(8)); - let mut sub = client.subscribe_with_config(config).await.expect("subscribe"); + let mut sub = client.live_events_with_config(config).await.expect("subscribe"); // 50ms per event – slow 3G batch consumer drain_with_delay( @@ -833,7 +833,7 @@ fn subscription_repeated_reconnect_loop() { println!("[TEST] reconnect loop round {}/{}", round, RECONNECT_ROUNDS); let client = slow_client(30, 30).expect("client"); - let mut sub = client.subscribe(&query).await.expect("subscribe"); + let mut sub = client.live_events(&query).await.expect("subscribe"); // Read just a few events then drop abruptly (simulates connection reset) let (evs, hit_error) = @@ -900,7 +900,7 @@ fn subscription_stable_after_idle_pause() { let found = rt.block_on(async { let client = slow_client(30, 30).expect("client"); - let mut sub = client.subscribe(&query).await.expect("subscribe"); + let mut sub = client.live_events(&query).await.expect("subscribe"); // Drain initial snapshot let (_, _) = @@ -968,7 +968,7 @@ fn subscription_burst_then_slow_catchup() { let (received_burst, hit_error) = rt.block_on(async { let client = slow_client(180, 180).expect("client"); - let mut sub = client.subscribe(&query).await.expect("subscribe"); + let mut sub = client.live_events(&query).await.expect("subscribe"); // Drain empty initial Ack let (_, _) = diff --git a/docker/build/Dockerfile b/docker/build/Dockerfile index cfc40a053..4ff9c723f 100644 --- a/docker/build/Dockerfile +++ b/docker/build/Dockerfile @@ -259,11 +259,15 @@ RUN sed -i 's|data_path = "\./data"|data_path = "/data"|g' /config/server.toml # Switch to non-root user USER kalamdb -# mimalloc tuning: eager memory return and reduced arena count. -# MIMALLOC_PURGE_DELAY=100: purge freed segments after 100ms (default 10s) -# MIMALLOC_ARENA_EAGER_COMMIT=1: commit arena pages immediately -ENV MIMALLOC_PURGE_DELAY=100 -ENV MIMALLOC_ARENA_EAGER_COMMIT=1 +# mimalloc tuning for low idle RSS on Linux containers. +# The default image favors minimal idle footprint; production deployments can +# override worker/thread counts or allocator knobs through environment variables. +ENV MIMALLOC_PURGE_DELAY=0 +ENV MIMALLOC_PURGE_DECOMMITS=1 +ENV MIMALLOC_EAGER_COMMIT=0 +ENV MIMALLOC_ARENA_EAGER_COMMIT=0 +ENV MIMALLOC_ABANDONED_PAGE_PURGE=1 +ENV MIMALLOC_ALLOW_THP=0 # Set working directory WORKDIR /data diff --git a/docker/build/Dockerfile.prebuilt b/docker/build/Dockerfile.prebuilt index 995b2b891..de4865b79 100644 --- a/docker/build/Dockerfile.prebuilt +++ b/docker/build/Dockerfile.prebuilt @@ -74,6 +74,17 @@ RUN mkdir -p /data/.kalam && \ chown -R 65532:65532 /data /config ENV HOME=/data + +# mimalloc tuning for low idle RSS in the published container image. +# Deployments that want higher concurrency can still override server config or +# allocator knobs explicitly at runtime. +ENV MIMALLOC_PURGE_DELAY=0 +ENV MIMALLOC_PURGE_DECOMMITS=1 +ENV MIMALLOC_EAGER_COMMIT=0 +ENV MIMALLOC_ARENA_EAGER_COMMIT=0 +ENV MIMALLOC_ABANDONED_PAGE_PURGE=1 +ENV MIMALLOC_ALLOW_THP=0 + USER 65532:65532 WORKDIR /data diff --git a/docs/api/api-reference.md b/docs/api/api-reference.md index 2f3048215..5ce08ed15 100644 --- a/docs/api/api-reference.md +++ b/docs/api/api-reference.md @@ -16,7 +16,7 @@ This reference is aligned with the current route + handler implementations. ### SQL and files - `POST /v1/api/sql` -- `GET /v1/files/{namespace}/{table_name}/{subfolder}/{file_id}` +- `GET /v1/files/{namespace}/{table_name}/{subfolder}/{stored_name}` ### WebSocket @@ -212,7 +212,7 @@ Current `error.code` values include: ## 4) File Download API -## `GET /v1/files/{namespace}/{table_name}/{subfolder}/{file_id}` +## `GET /v1/files/{namespace}/{table_name}/{subfolder}/{stored_name}` Download previously stored file bytes. @@ -224,17 +224,17 @@ Download previously stored file bytes. - `user_id` (optional) - Only meaningful for **user tables** - - Cross-user downloads are allowed only when the authenticated actor is authorized for the target ID's cached role class + - Cross-user raw-byte downloads are limited to `dba` and `system` actors that are authorized for the target ID's cached role class ### Behavior by table type -- `User` table: downloads from the authenticated user scope by default, or from an authorized target user scope when `user_id` is allowed by the role matrix. Service, DBA, and system target IDs are checked from the in-memory privileged-user role cache; other target IDs are treated as regular users. +- `User` table: downloads from the authenticated user scope by default. `dba` and `system` actors may supply `user_id` for an authorized target user scope. `service` actors can write user-scoped rows through `EXECUTE AS USER`, but cannot directly download another user's FILE bytes with `user_id`. - `Shared` table: allowed only if shared access policy permits; `user_id` query is rejected - `Stream`/`System` table: rejected (`file storage not supported`) ### Validation -- `subfolder` and `file_id` are path-validated (no traversal patterns) +- `subfolder` and `stored_name` are path-validated (no traversal patterns) ### Responses @@ -444,6 +444,8 @@ Response: Notes: - `payload` is base64-encoded bytes +- omit `group_id` for stateless inspection; stateless reads honor `start` on every request and do not create group offsets +- include `group_id` for durable group consumption; existing committed offsets resume before `start` is considered - `timeout_seconds` is accepted in request but is not currently used by handler logic ### `POST /v1/api/topics/ack` diff --git a/docs/architecture/topic-consumption.md b/docs/architecture/topic-consumption.md index 177e758a5..2a185dbe5 100644 --- a/docs/architecture/topic-consumption.md +++ b/docs/architecture/topic-consumption.md @@ -56,6 +56,17 @@ If a consumer claims messages and does not ack before `topics.visibility_timeout_secs`, the next fetch expires that stale claim and resets the group cursor to the earliest expired offset for redelivery. +`RESET CONSUMER GROUP` is the explicit administrative path for moving a group +cursor backward or forward. It force-sets the next offset for one +`(topic_id, group_id, partition_id)`, clears any in-memory pending claims for +that key, and updates `system.topic_offsets` when the requested next offset is +greater than zero. Resetting to `0` removes the committed offset row because the +table stores `last_acked_offset` and there is no valid offset before zero. + +HTTP consume can omit `group_id` for stateless inspection reads. Stateless reads +use the requested `start` position on every request and do not create group +claims or committed offset rows. + The visibility timeout can be configured in `server.toml`: ```toml diff --git a/docs/reference/sql.md b/docs/reference/sql.md index 66feffdc4..c71931956 100644 --- a/docs/reference/sql.md +++ b/docs/reference/sql.md @@ -434,6 +434,26 @@ GROUP '' UPTO OFFSET ; ``` +### RESET CONSUMER GROUP + +```sql +RESET CONSUMER GROUP '' +ON +[PARTITION ] +TO ; +``` + +Examples: + +```sql +RESET CONSUMER GROUP 'worker-1' ON app.new_messages TO 0; +RESET CONSUMER GROUP 'worker-1' ON app.new_messages PARTITION 0 TO 250; +``` + +`RESET CONSUMER GROUP` is admin-only and moves one consumer-group partition to +the next offset you specify. It also clears pending in-memory claims for that +group partition so the reset takes effect immediately. + ## Cluster Commands ```sql diff --git a/docs/sdk/sdk.md b/docs/sdk/sdk.md index 8bcf3a6f2..2d156375b 100644 --- a/docs/sdk/sdk.md +++ b/docs/sdk/sdk.md @@ -1,8 +1,8 @@ # KalamDB TypeScript/JavaScript SDK -The official TypeScript/JavaScript SDK for KalamDB, built on top of a Rust → WASM core. +The official TypeScript/JavaScript SDK for KalamDB, built on top of a Rust -> WASM core. -Worker and topic-consumer APIs now live in the separate `@kalamdb/consumer` package. This page focuses on the app-facing `@kalamdb/client` surface. +Worker and topic-consumer APIs now live in the separate `@kalamdb/consumer` package. React live-query UI APIs live in `@kalamdb/react`, which wraps the app-facing `@kalamdb/client` surface. - **Tiny bundle size** with minimal dependencies - **Cross-platform**: Works in Node.js and browsers @@ -19,6 +19,13 @@ yarn add @kalamdb/client pnpm add @kalamdb/client ``` +For React live-query components and hooks: + +```bash +npm install @kalamdb/client @kalamdb/react react react-dom +npm install @kalamdb/orm drizzle-orm +``` + ## Building From Source (This Repo) This repo contains two related pieces: @@ -97,6 +104,58 @@ await unsubscribe(); await client.disconnect(); ``` +## React Live Queries + +`@kalamdb/react` provides `KalamProvider`, `LiveQuery`, `LiveQueries`, `useLiveQuery`, `useLiveQueries`, and `useLiveSelection`. It supports raw SQL mode and typed Drizzle mode through `@kalamdb/orm`. + +```tsx +import { KalamProvider, LiveQueries, useLiveSelection } from '@kalamdb/react'; +import { asc, eq } from 'drizzle-orm'; +import { createClient, Auth } from '@kalamdb/client'; +import { approvals, messages, toolCalls, typing } from './schema.generated'; + +const client = createClient({ + url: 'http://localhost:8080', + authProvider: async () => Auth.basic('admin', 'AdminPass123!'), +}); + +export function AssistantScreen({ conversationId }: { conversationId: string }) { + return ( + + eq(table.conversationId, conversationId), + orderBy: (table) => asc(table.createdAt), + deps: [conversationId], + }, + typing: { table: typing, where: (table) => eq(table.conversationId, conversationId), deps: [conversationId] }, + toolCalls: { table: toolCalls, where: (table) => eq(table.conversationId, conversationId), deps: [conversationId] }, + approvals: { table: approvals, where: (table) => eq(table.conversationId, conversationId), deps: [conversationId] }, + }} + > + {(live) => } + + + ); +} + +function AssistantBody({ live }) { + const assistant = useLiveSelection(live, (context) => ({ + messages: context.messages.rows, + typingUsers: context.typing.rows.map((row) => row.userName), + activeTools: context.toolCalls.rows.filter((row) => row.status !== 'completed'), + pendingApprovals: context.approvals.rows.filter((row) => row.status === 'pending'), + approve: (approvalId: string) => context.update(approvals, approvalId).set({ status: 'approved' }), + })); + + return ; +} +``` + +The repo includes [../../examples/react-ai-chat](../../examples/react-ai-chat), a runnable React validation app with conversation sidebar, history loading, multi-file messages, typing, tool activity, streamed replies, edit/cancel actions, and human approvals. + ## API Reference ### Creating a Client diff --git a/examples/chat-with-ai/README.md b/examples/chat-with-ai/README.md index 3a623ccb0..2f7e4c2b2 100644 --- a/examples/chat-with-ai/README.md +++ b/examples/chat-with-ai/README.md @@ -154,4 +154,4 @@ If port 5173 is busy, Vite prints the next available local URL. Use the URL from ## License -Part of the KalamDB project. See the repository root for license details. +Licensed under the Apache License, Version 2.0 (`Apache-2.0`). See [../../LICENSE.txt](../../LICENSE.txt) and [../../NOTICE](../../NOTICE). diff --git a/examples/chat-with-ai/scripts/ensure-sdk.sh b/examples/chat-with-ai/scripts/ensure-sdk.sh index 53a081c06..eaf48ba8e 100755 --- a/examples/chat-with-ai/scripts/ensure-sdk.sh +++ b/examples/chat-with-ai/scripts/ensure-sdk.sh @@ -8,22 +8,32 @@ TYPESCRIPT_SDK_DIR="$(cd "$PROJECT_DIR/../../link/sdks/typescript" && pwd)" CLIENT_DIR="$TYPESCRIPT_SDK_DIR/client" CONSUMER_DIR="$TYPESCRIPT_SDK_DIR/consumer" ORM_DIR="$TYPESCRIPT_SDK_DIR/orm" +CLIENT_CORE_DIR="$(cd "$PROJECT_DIR/../../link/kalam-client" && pwd)" CLIENT_ENTRY="$CLIENT_DIR/dist/src/index.js" CLIENT_WASM="$CLIENT_DIR/dist/wasm/kalam_client_bg.wasm" CONSUMER_ENTRY="$CONSUMER_DIR/dist/src/index.js" CONSUMER_WASM="$CONSUMER_DIR/dist/wasm/kalam_consumer_bg.wasm" ORM_ENTRY="$ORM_DIR/dist/index.js" +client_needs_build=false orm_needs_build=false +if [ ! -f "$CLIENT_ENTRY" ] || [ ! -f "$CLIENT_WASM" ]; then + client_needs_build=true +elif find "$CLIENT_DIR/src" "$CLIENT_CORE_DIR/src" -type f \( -name '*.ts' -o -name '*.rs' \) -newer "$CLIENT_ENTRY" | grep -q .; then + client_needs_build=true +elif find "$CLIENT_DIR/src" "$CLIENT_CORE_DIR/src" -type f \( -name '*.ts' -o -name '*.rs' \) -newer "$CLIENT_WASM" | grep -q .; then + client_needs_build=true +fi + if [ ! -f "$ORM_ENTRY" ]; then orm_needs_build=true elif find "$ORM_DIR/src" -type f -newer "$ORM_ENTRY" | grep -q .; then orm_needs_build=true fi -if [ ! -f "$CLIENT_ENTRY" ] || [ ! -f "$CLIENT_WASM" ]; then - echo "@kalamdb/client is not compiled. Building now..." +if [ "$client_needs_build" = true ]; then + echo "@kalamdb/client needs a rebuild. Building now..." echo "" cd "$CLIENT_DIR" bash build.sh diff --git a/examples/chat-with-ai/src/App.tsx b/examples/chat-with-ai/src/App.tsx index 3268bcc2f..b565cf18a 100644 --- a/examples/chat-with-ai/src/App.tsx +++ b/examples/chat-with-ai/src/App.tsx @@ -4,14 +4,14 @@ import { Auth, ChangeType, MessageType, + type RowData, createClient, type SubscriptionErrorEvent, } from '@kalamdb/client'; import { eq } from 'drizzle-orm'; -import { kalamDriver, liveTable, subscribeTable, type TableSubscriptionEvent } from '@kalamdb/orm'; +import { kalamDriver, liveTable } from '@kalamdb/orm'; import { drizzle } from 'drizzle-orm/pg-proxy'; import { - chat_demo_agent_events as agentEvents, chat_demo_agent_eventsConfig as agentEventsConfig, chat_demo_messages as chatMessages, type ChatDemoAgentEvents as AgentEventRow, @@ -37,6 +37,11 @@ const timeFormatter = new Intl.DateTimeFormat(undefined, { second: '2-digit', }); +function resolveBrowserWasmUrl(): string { + const base = '/wasm/kalam_client_bg.wasm'; + return import.meta.env.DEV ? `${base}?t=${Date.now()}` : base; +} + function createAuthedClient() { return createClient({ url: import.meta.env.VITE_KALAMDB_URL ?? 'http://127.0.0.1:8080', @@ -45,6 +50,7 @@ function createAuthedClient() { import.meta.env.VITE_KALAMDB_PASSWORD ?? 'kalamdb123', ), disableCompression: true, + wasmUrl: resolveBrowserWasmUrl(), }); } @@ -145,6 +151,28 @@ function deriveFallbackDraft(messages: ChatMessageRow[]): LiveDraft | null { return null; } +function sqlLiteral(value: string): string { + return `'${value.replace(/'/g, "''")}'`; +} + +function mapAgentEventRow(row: RowData): AgentEventRow { + return { + _seq: row._seq?.asSeqId()?.toString() ?? row._seq?.asString() ?? null, + id: row.id?.asString() ?? '', + response_id: row.response_id?.asString() ?? '', + room: row.room?.asString() ?? '', + sender_username: row.sender_username?.asString() ?? '', + stage: row.stage?.asString() ?? '', + preview: row.preview?.asString() ?? '', + message: row.message?.asString() ?? '', + created_at: row.created_at?.asDate() ?? new Date(0), + }; +} + +function mapAgentEventRows(rows: RowData[] | undefined): AgentEventRow[] { + return (rows ?? []).map(mapAgentEventRow); +} + export function App() { const [messages, setMessages] = useState([]); const [events, setEvents] = useState([]); @@ -167,7 +195,7 @@ export function App() { }); }; - const handleEventSubscription = (event: TableSubscriptionEvent): void => { + const handleEventSubscription = (event: { type: string; rows?: RowData[]; old_values?: RowData[]; code?: string; message?: string; change_type?: string }): void => { if (!active) { return; } @@ -183,7 +211,7 @@ export function App() { } if (event.type === MessageType.InitialDataBatch) { - publishEvents(upsertEvents(bufferedEvents, event.rows ?? [])); + publishEvents(upsertEvents(bufferedEvents, mapAgentEventRows(event.rows))); return; } @@ -192,16 +220,16 @@ export function App() { } if (event.change_type === ChangeType.Delete) { - publishEvents(removeEvents(bufferedEvents, event.old_values ?? [])); + publishEvents(removeEvents(bufferedEvents, mapAgentEventRows(event.old_values))); return; } let nextEvents = bufferedEvents; if (event.change_type === ChangeType.Update) { - nextEvents = removeEvents(nextEvents, event.old_values ?? []); + nextEvents = removeEvents(nextEvents, mapAgentEventRows(event.old_values)); } - publishEvents(upsertEvents(nextEvents, event.rows ?? [])); + publishEvents(upsertEvents(nextEvents, mapAgentEventRows(event.rows))); }; const start = async (): Promise => { @@ -217,11 +245,11 @@ export function App() { }, { where: eq(chatMessages.room, ROOM), - // `last_rows` asks the server for a rewind window at subscribe time. + // `lastRows` asks the server for a rewind window at subscribe time. // `limit` keeps the materialized client-side live state bounded // after that rewind and across later live changes. limit: MAX_CHAT_MESSAGES, - subscriptionOptions: { last_rows: MAX_CHAT_MESSAGES }, + lastRows: MAX_CHAT_MESSAGES, onError: (event: SubscriptionErrorEvent) => { if (!active) { return; @@ -235,13 +263,11 @@ export function App() { // The draft rail keeps raw protocol frames so rapid typing bursts can // be reconciled locally instead of waiting for a full live-row view. - const eventsUnsubscribe = await subscribeTable( - client, - agentEvents, + const eventsUnsubscribe = await client.liveEvents( + `SELECT * FROM chat_demo.agent_events WHERE room = ${sqlLiteral(ROOM)}`, handleEventSubscription, { - where: eq(agentEvents.room, ROOM), - last_rows: MAX_AGENT_EVENTS, + lastRows: MAX_AGENT_EVENTS, }, ); unsubscribers.push(eventsUnsubscribe); diff --git a/examples/chat-with-ai/tests/chat.spec.mjs b/examples/chat-with-ai/tests/chat.spec.mjs index df47de40d..db882eeb5 100644 --- a/examples/chat-with-ai/tests/chat.spec.mjs +++ b/examples/chat-with-ai/tests/chat.spec.mjs @@ -15,6 +15,14 @@ const adminPassword = 'kalamdb123'; let agentProcess; const testGroup = `chat-demo-test-${Date.now()}`; +function appendNodeOption(existing, flag) { + if (!existing) { + return flag; + } + + return existing.split(/\s+/).includes(flag) ? existing : `${existing} ${flag}`; +} + async function login(user, password) { const response = await fetch(`${serverUrl}/v1/api/auth/login`, { method: 'POST', @@ -334,6 +342,7 @@ test.beforeAll(async () => { ...process.env, KALAMDB_GROUP: testGroup, KALAMDB_START: 'latest', + NODE_OPTIONS: appendNodeOption(process.env.NODE_OPTIONS, '--preserve-symlinks'), }, }); @@ -584,10 +593,10 @@ test('subscription can disconnect and resume from the saved checkpoint without r let resumedUnsubscribe; try { - initialUnsubscribe = await client.subscribeWithSql( + initialUnsubscribe = await client.liveEvents( sql, (event) => preEvents.push(event), - { last_rows: 0 }, + { lastRows: 0 }, ); await waitFor( @@ -614,10 +623,10 @@ test('subscription can disconnect and resume from the saved checkpoint without r await insertAssistantMessages(resumeRoom, [gapContent]); - resumedUnsubscribe = await client.subscribeWithSql( + resumedUnsubscribe = await client.liveEvents( sql, (event) => resumedEvents.push(event), - { from: checkpoint, last_rows: 0 }, + { from: checkpoint, lastRows: 0 }, ); expect(client.isConnected()).toBe(true); diff --git a/examples/react-ai-chat/.gitignore b/examples/react-ai-chat/.gitignore new file mode 100644 index 000000000..b3d832416 --- /dev/null +++ b/examples/react-ai-chat/.gitignore @@ -0,0 +1,5 @@ +node_modules/ +dist/ +.env +.env.local +*.log \ No newline at end of file diff --git a/examples/react-ai-chat/README.md b/examples/react-ai-chat/README.md new file mode 100644 index 000000000..a6f1e1f6d --- /dev/null +++ b/examples/react-ai-chat/README.md @@ -0,0 +1,48 @@ +# React AI Chat Example + +This example is a chat-application validation surface for `@kalamdb/react`. It keeps the browser code in [src/app](src/app) and the topic worker in [src/agent](src/agent), then demonstrates conversations, websocket-confirmed sends, streamed typing tokens, final assistant inserts, file attachments, and approval actions with small, readable components. + +The example setup is intentionally simple: one SQL file and one setup script. The setup script uses the installed `kalam` CLI, clears the two example topics if they already exist, imports [chat-app.sql](chat-app.sql), then writes `.env.local` for the browser and agent. + +## Quick Start + +```bash +npm install +npm run setup +npm run agent +npm run dev +``` + +Open the Vite URL, usually `http://127.0.0.1:5176`. + +`npm run setup` clears the example topics if they already exist, runs `kalam --file chat-app.sql`, creates the tables and topics, seeds the default conversation, and writes `.env.local` with `VITE_KALAMDB_DEMO_MODE=false`. + +If you need non-default credentials, set `KALAMDB_URL`, `KALAMDB_USER`, and `KALAMDB_PASSWORD` before running the script. + +Send a message like: + +```text +customer refund needs approval +``` + +The agent will create approval rows, stream typing tokens, and continue after approval actions. + +If you want browser-only demo mode instead of the server-backed flow, skip `npm run setup` and set `VITE_KALAMDB_DEMO_MODE=true` in `.env.local`. + +## Files Worth Reading + +- [src/app/App.tsx](src/app/App.tsx): `KalamProvider` and `LiveQueries` orchestration. +- [src/app/components/Aside.tsx](src/app/components/Aside.tsx): conversation creation and selection. +- [src/app/components/Conversation.tsx](src/app/components/Conversation.tsx): sticky header, scrollable messages, sticky composer. +- [src/app/components/ChatComposer.tsx](src/app/components/ChatComposer.tsx): attach-file and enter-to-send composer. +- [src/agent/index.ts](src/agent/index.ts): deterministic topic-agent worker. +- [chat-app.sql](chat-app.sql): full namespace reset, table/topic creation, and seed data. + +## Validation + +```bash +npm run build +npm test +``` + +The package scripts build the local TypeScript SDKs first if their `dist` folders are missing. \ No newline at end of file diff --git a/examples/react-ai-chat/chat-app.sql b/examples/react-ai-chat/chat-app.sql new file mode 100644 index 000000000..c17be402c --- /dev/null +++ b/examples/react-ai-chat/chat-app.sql @@ -0,0 +1,72 @@ +CREATE NAMESPACE IF NOT EXISTS react_ai_chat; + +DROP TABLE IF EXISTS react_ai_chat.approval_actions; +DROP TABLE IF EXISTS react_ai_chat.typing_tokens; +DROP TABLE IF EXISTS react_ai_chat.approvals; +DROP TABLE IF EXISTS react_ai_chat.messages; +DROP TABLE IF EXISTS react_ai_chat.conversations; + +CREATE TABLE IF NOT EXISTS react_ai_chat.conversations ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL, + summary TEXT NOT NULL DEFAULT '', + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +) WITH (TYPE = 'USER'); + +CREATE TABLE IF NOT EXISTS react_ai_chat.messages ( + id BIGINT PRIMARY KEY DEFAULT SNOWFLAKE_ID(), + client_id TEXT, + conversation_id TEXT NOT NULL, + reply_to_message_id TEXT, + role TEXT NOT NULL, + body TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'sent', + attachment FILE, + approval_id TEXT, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +) WITH (TYPE = 'USER'); + +CREATE TABLE IF NOT EXISTS react_ai_chat.approvals ( + id TEXT PRIMARY KEY, + conversation_id TEXT NOT NULL, + message_id TEXT NOT NULL, + title TEXT NOT NULL, + body TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +) WITH (TYPE = 'USER'); + +CREATE TABLE IF NOT EXISTS react_ai_chat.typing_tokens ( + id BIGINT PRIMARY KEY DEFAULT SNOWFLAKE_ID(), + conversation_id TEXT NOT NULL, + message_id TEXT NOT NULL, + status TEXT NOT NULL, + token TEXT NOT NULL DEFAULT '', + created_at TIMESTAMP NOT NULL DEFAULT NOW() +) WITH (TYPE = 'STREAM', TTL_SECONDS = 120); + +CREATE TABLE IF NOT EXISTS react_ai_chat.approval_actions ( + id BIGINT PRIMARY KEY DEFAULT SNOWFLAKE_ID(), + approval_id TEXT NOT NULL, + conversation_id TEXT NOT NULL, + action TEXT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW() +) WITH (TYPE = 'USER'); + +CREATE TOPIC react_ai_chat.agent_messages; +ALTER TOPIC react_ai_chat.agent_messages ADD SOURCE react_ai_chat.messages ON INSERT; + +CREATE TOPIC react_ai_chat.agent_actions; +ALTER TOPIC react_ai_chat.agent_actions ADD SOURCE react_ai_chat.approval_actions ON INSERT; + +INSERT INTO react_ai_chat.conversations (id, title, summary) +VALUES ('project-alpha', 'Project Alpha', 'Data analysis with approval-gated database migration'); + +INSERT INTO react_ai_chat.approvals (id, conversation_id, message_id, title, body, status) +VALUES ('approval-project-alpha', 'project-alpha', '1001', 'Action Required', 'Run database migration for Project Alpha?', 'pending'); + +INSERT INTO react_ai_chat.messages (id, client_id, conversation_id, role, body, status, approval_id) +VALUES (1001, NULL, 'project-alpha', 'assistant', 'I analyzed the historical datasets and generated the quarterly summary chart. The European market outliers are isolated and ready for review.', 'complete', 'approval-project-alpha'); \ No newline at end of file diff --git a/examples/react-ai-chat/index.html b/examples/react-ai-chat/index.html new file mode 100644 index 000000000..dd989b306 --- /dev/null +++ b/examples/react-ai-chat/index.html @@ -0,0 +1,12 @@ + + + + + + KalamDB React AI Chat + + +
+ + + \ No newline at end of file diff --git a/examples/react-ai-chat/package-lock.json b/examples/react-ai-chat/package-lock.json new file mode 100644 index 000000000..4f3fa8958 --- /dev/null +++ b/examples/react-ai-chat/package-lock.json @@ -0,0 +1,1852 @@ +{ + "name": "kalamdb-react-ai-chat", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "kalamdb-react-ai-chat", + "version": "1.0.0", + "dependencies": { + "@kalamdb/client": "file:../../link/sdks/typescript/client", + "@kalamdb/consumer": "file:../../link/sdks/typescript/consumer", + "@kalamdb/orm": "file:../../link/sdks/typescript/orm", + "@kalamdb/react": "file:../../link/sdks/typescript/react-old", + "dotenv": "^17.4.2", + "drizzle-orm": "^0.45.2", + "lucide-react": "^1.14.0", + "react": "^19.2.5", + "react-dom": "^19.2.5" + }, + "devDependencies": { + "@types/node": "^25.6.0", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^6.0.1", + "tsx": "^4.21.0", + "typescript": "^6.0.3", + "vite": "^8.0.10" + } + }, + "../../link/sdks/typescript/client": { + "name": "@kalamdb/client", + "version": "0.5.0-beta.1", + "license": "Apache-2.0", + "dependencies": { + "ws": "^8.20.0" + }, + "devDependencies": { + "@types/node": "^25.5.2", + "typescript": "^6.0.2" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "../../link/sdks/typescript/client/node_modules/@types/node": { + "version": "25.5.2", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.18.0" + } + }, + "../../link/sdks/typescript/client/node_modules/typescript": { + "version": "6.0.2", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "../../link/sdks/typescript/client/node_modules/undici-types": { + "version": "7.18.2", + "dev": true, + "license": "MIT" + }, + "../../link/sdks/typescript/client/node_modules/ws": { + "version": "8.20.0", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "../../link/sdks/typescript/consumer": { + "name": "@kalamdb/consumer", + "version": "0.5.0-beta.1", + "license": "Apache-2.0", + "devDependencies": { + "@types/node": "^25.5.0", + "typescript": "^6.0.2" + }, + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "@kalamdb/client": "^0.4.2-rc.1" + } + }, + "../../link/sdks/typescript/consumer/node_modules/@kalamdb/client": { + "resolved": "../../link/sdks/typescript/client", + "link": true + }, + "../../link/sdks/typescript/consumer/node_modules/@types/node": { + "version": "25.6.1", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.19.0" + } + }, + "../../link/sdks/typescript/consumer/node_modules/typescript": { + "version": "6.0.3", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "../../link/sdks/typescript/consumer/node_modules/undici-types": { + "version": "7.19.2", + "dev": true, + "license": "MIT" + }, + "../../link/sdks/typescript/orm": { + "name": "@kalamdb/orm", + "version": "0.5.0-beta.1", + "license": "Apache-2.0", + "bin": { + "kalamdb-orm": "dist/cli.js" + }, + "devDependencies": { + "@kalamdb/client": "file:../client", + "@kalamdb/consumer": "file:../consumer", + "@types/node": "^25.5.2", + "drizzle-orm": "^0.45.2", + "typescript": "^5.8.0" + }, + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "@kalamdb/client": ">=0.4.0", + "drizzle-orm": ">=0.45.2" + } + }, + "../../link/sdks/typescript/orm/node_modules/@kalamdb/consumer": { + "resolved": "../../link/sdks/typescript/consumer", + "link": true + }, + "../../link/sdks/typescript/orm/node_modules/@types/node": { + "version": "25.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.19.0" + } + }, + "../../link/sdks/typescript/orm/node_modules/typescript": { + "version": "5.9.3", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "../../link/sdks/typescript/orm/node_modules/undici-types": { + "version": "7.19.2", + "dev": true, + "license": "MIT" + }, + "../../link/sdks/typescript/react-old": { + "name": "@kalamdb/react", + "version": "0.5.0-beta.1", + "license": "Apache-2.0", + "devDependencies": { + "@kalamdb/client": "file:../client", + "@kalamdb/orm": "file:../orm", + "@testing-library/jest-dom": "^6.9.1", + "@testing-library/react": "^16.3.2", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "drizzle-orm": "^0.45.2", + "jsdom": "^29.1.1", + "react": "^19.2.5", + "react-dom": "^19.2.5", + "typescript": "^6.0.3", + "vitest": "^4.1.5" + }, + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "@kalamdb/client": ">=0.4.3-rc.4", + "@kalamdb/orm": ">=0.4.3-rc.5", + "drizzle-orm": ">=0.45.2", + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0" + }, + "peerDependenciesMeta": { + "@kalamdb/orm": { + "optional": true + }, + "drizzle-orm": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/@emnapi/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz", + "integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.2.1", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz", + "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/wasi-threads": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", + "integrity": "sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.7.tgz", + "integrity": "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.7.tgz", + "integrity": "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.7.tgz", + "integrity": "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.7.tgz", + "integrity": "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.7", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.7.tgz", + "integrity": "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.7.tgz", + "integrity": "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.7.tgz", + "integrity": "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.7.tgz", + "integrity": "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.7.tgz", + "integrity": "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.7.tgz", + "integrity": "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.7.tgz", + "integrity": "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.7.tgz", + "integrity": "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.7.tgz", + "integrity": "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.7.tgz", + "integrity": "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.7.tgz", + "integrity": "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.7.tgz", + "integrity": "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.7.tgz", + "integrity": "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.7.tgz", + "integrity": "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.7.tgz", + "integrity": "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.7.tgz", + "integrity": "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.7.tgz", + "integrity": "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.7.tgz", + "integrity": "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.7.tgz", + "integrity": "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.7.tgz", + "integrity": "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.7.tgz", + "integrity": "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@kalamdb/client": { + "resolved": "../../link/sdks/typescript/client", + "link": true + }, + "node_modules/@kalamdb/consumer": { + "resolved": "../../link/sdks/typescript/consumer", + "link": true + }, + "node_modules/@kalamdb/orm": { + "resolved": "../../link/sdks/typescript/orm", + "link": true + }, + "node_modules/@kalamdb/react": { + "resolved": "../../link/sdks/typescript/react-old", + "link": true + }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.4.tgz", + "integrity": "sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@tybys/wasm-util": "^0.10.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + }, + "peerDependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1" + } + }, + "node_modules/@oxc-project/types": { + "version": "0.128.0", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Boshen" + } + }, + "node_modules/@rolldown/binding-android-arm64": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-android-arm64/-/binding-android-arm64-1.0.0-rc.18.tgz", + "integrity": "sha512-lIDyUAfD7U3+BWKzdxMbJcsYHuqXqmGz40aeRqvuAm3y5TkJSYTBW2RDrn65DJFPQqVjUAUqq5uz8urzQ8aBdQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-arm64": { + "version": "1.0.0-rc.18", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-x64": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-x64/-/binding-darwin-x64-1.0.0-rc.18.tgz", + "integrity": "sha512-5Ofot8xbs+pxRHJqm9/9N/4sTQOvdrwEsmPE9pdLEEoAbdZtG6F2LMDfO1sp6ZAtXJuJV/21ew2srq3W8NXB5g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-freebsd-x64": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-freebsd-x64/-/binding-freebsd-x64-1.0.0-rc.18.tgz", + "integrity": "sha512-7h8eeOTT1eyqJyx64BFCnWZpNm486hGWt2sqeLLgDxA0xI1oGZ9H7gK1S85uNGmBhkdPwa/6reTxfFFKvIsebw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm-gnueabihf": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.0.0-rc.18.tgz", + "integrity": "sha512-eRcm/HVt9U/JFu5RKAEKwGQYtDCKWLiaH6wOnsSEp6NMBb/3Os8LgHZlNyzMpFVNmiiMFlfb2zEnebfzJrHFmg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-gnu": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.0.0-rc.18.tgz", + "integrity": "sha512-SOrT/cT4ukTmgnrEz/Hg3m7LBnuCLW9psDeMKrimRWY4I8DmnO7Lco8W2vtqPmMkbVu8iJ+g4GFLVLLOVjJ9DQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-musl": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.0.0-rc.18.tgz", + "integrity": "sha512-QWjdxN1HJCpBTAcZ5N5F7wju3gVPzRzSpmGzx7na0c/1qpN9CFil+xt+l9lV/1M6/gqHSNXCiqPfwhVJPeLnug==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-ppc64-gnu": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.0.0-rc.18.tgz", + "integrity": "sha512-ugCOyj7a4d9h3q9B+wXmf6g3a68UsjGh6dob5DHevHGMwDUbhsYNbSPxJsENcIttJZ9jv7qGM2UesLw5jqIhdg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-s390x-gnu": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.0.0-rc.18.tgz", + "integrity": "sha512-kKWRhbsotpXkGbcd5dllUWg5gEXcDAa8u5YnP9AV5DYNbvJHGzzuwv7dpmhc8NqKMJldl0a+x76IHbspEpEmdA==", + "cpu": [ + "s390x" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-gnu": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.0.0-rc.18.tgz", + "integrity": "sha512-uCo8ElcCIAMyYAZyuIZ81oFkhTSIllNvUCHCAlbhlN4ji3uC28h7IIdlXyIvGO7HsuqnV9p3rD/bpH7XhIyhRw==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-musl": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-musl/-/binding-linux-x64-musl-1.0.0-rc.18.tgz", + "integrity": "sha512-XNOQZtuE6yUIvx4rwGemwh8kpL1xvU41FXy/s9K7T/3JVcqGzo3NfKM2HrbrGgfPYGFW42f07Wk++aOC6B9NWA==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-openharmony-arm64": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-openharmony-arm64/-/binding-openharmony-arm64-1.0.0-rc.18.tgz", + "integrity": "sha512-tSn/kzrfa7tNOXr7sEacDBN4YsIqTyLqh45IO0nHDwtpKIDNDJr+VFojt+4klSpChxB29JLyduSsE0MKEwa65A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-wasm32-wasi": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-wasm32-wasi/-/binding-wasm32-wasi-1.0.0-rc.18.tgz", + "integrity": "sha512-+J9YGmc+czgqlhYmwun3S3O0FIZhsH8ep2456xwjAdIOmuJxM7xz4P4PtrxU+Bz17a/5bqPA8o3HAAoX0teUdg==", + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "1.10.0", + "@emnapi/runtime": "1.10.0", + "@napi-rs/wasm-runtime": "^1.1.4" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-win32-arm64-msvc": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.0.0-rc.18.tgz", + "integrity": "sha512-zsu47DgU0FQzSwi6sU9dZoEdUv7pc1AptSEz/Z8HBg54sV0Pbs3N0+CrIbTsgiu6EyoaNN9CHboqbLaz9lhOyQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-win32-x64-msvc": { + "version": "1.0.0-rc.18", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.0.0-rc.18.tgz", + "integrity": "sha512-7H+3yqGgmnlDTRRhw/xpYY9J1kf4GC681nVc4GqKhExZTDrVVrV2tsOR9kso0fvgBdcTCcQShx4SLLoHgaLwhg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.7", + "dev": true, + "license": "MIT" + }, + "node_modules/@tybys/wasm-util": { + "version": "0.10.2", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.2.tgz", + "integrity": "sha512-RoBvJ2X0wuKlWFIjrwffGw1IqZHKQqzIchKaadZZfnNpsAYp2mM0h36JtPCjNDAHGgYez/15uMBpfGwchhiMgg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@types/node": { + "version": "25.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.19.0" + } + }, + "node_modules/@types/react": { + "version": "19.2.14", + "dev": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@rolldown/pluginutils": "1.0.0-rc.7" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "@rolldown/plugin-babel": "^0.1.7 || ^0.2.0", + "babel-plugin-react-compiler": "^1.0.0", + "vite": "^8.0.0" + }, + "peerDependenciesMeta": { + "@rolldown/plugin-babel": { + "optional": true + }, + "babel-plugin-react-compiler": { + "optional": true + } + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "dev": true, + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/dotenv": { + "version": "17.4.2", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/drizzle-orm": { + "version": "0.45.2", + "license": "Apache-2.0", + "peerDependencies": { + "@aws-sdk/client-rds-data": ">=3", + "@cloudflare/workers-types": ">=4", + "@electric-sql/pglite": ">=0.2.0", + "@libsql/client": ">=0.10.0", + "@libsql/client-wasm": ">=0.10.0", + "@neondatabase/serverless": ">=0.10.0", + "@op-engineering/op-sqlite": ">=2", + "@opentelemetry/api": "^1.4.1", + "@planetscale/database": ">=1.13", + "@prisma/client": "*", + "@tidbcloud/serverless": "*", + "@types/better-sqlite3": "*", + "@types/pg": "*", + "@types/sql.js": "*", + "@upstash/redis": ">=1.34.7", + "@vercel/postgres": ">=0.8.0", + "@xata.io/client": "*", + "better-sqlite3": ">=7", + "bun-types": "*", + "expo-sqlite": ">=14.0.0", + "gel": ">=2", + "knex": "*", + "kysely": "*", + "mysql2": ">=2", + "pg": ">=8", + "postgres": ">=3", + "sql.js": ">=1", + "sqlite3": ">=5" + }, + "peerDependenciesMeta": { + "@aws-sdk/client-rds-data": { + "optional": true + }, + "@cloudflare/workers-types": { + "optional": true + }, + "@electric-sql/pglite": { + "optional": true + }, + "@libsql/client": { + "optional": true + }, + "@libsql/client-wasm": { + "optional": true + }, + "@neondatabase/serverless": { + "optional": true + }, + "@op-engineering/op-sqlite": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@planetscale/database": { + "optional": true + }, + "@prisma/client": { + "optional": true + }, + "@tidbcloud/serverless": { + "optional": true + }, + "@types/better-sqlite3": { + "optional": true + }, + "@types/pg": { + "optional": true + }, + "@types/sql.js": { + "optional": true + }, + "@upstash/redis": { + "optional": true + }, + "@vercel/postgres": { + "optional": true + }, + "@xata.io/client": { + "optional": true + }, + "better-sqlite3": { + "optional": true + }, + "bun-types": { + "optional": true + }, + "expo-sqlite": { + "optional": true + }, + "gel": { + "optional": true + }, + "knex": { + "optional": true + }, + "kysely": { + "optional": true + }, + "mysql2": { + "optional": true + }, + "pg": { + "optional": true + }, + "postgres": { + "optional": true + }, + "prisma": { + "optional": true + }, + "sql.js": { + "optional": true + }, + "sqlite3": { + "optional": true + } + } + }, + "node_modules/esbuild": { + "version": "0.27.7", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.7", + "@esbuild/android-arm": "0.27.7", + "@esbuild/android-arm64": "0.27.7", + "@esbuild/android-x64": "0.27.7", + "@esbuild/darwin-arm64": "0.27.7", + "@esbuild/darwin-x64": "0.27.7", + "@esbuild/freebsd-arm64": "0.27.7", + "@esbuild/freebsd-x64": "0.27.7", + "@esbuild/linux-arm": "0.27.7", + "@esbuild/linux-arm64": "0.27.7", + "@esbuild/linux-ia32": "0.27.7", + "@esbuild/linux-loong64": "0.27.7", + "@esbuild/linux-mips64el": "0.27.7", + "@esbuild/linux-ppc64": "0.27.7", + "@esbuild/linux-riscv64": "0.27.7", + "@esbuild/linux-s390x": "0.27.7", + "@esbuild/linux-x64": "0.27.7", + "@esbuild/netbsd-arm64": "0.27.7", + "@esbuild/netbsd-x64": "0.27.7", + "@esbuild/openbsd-arm64": "0.27.7", + "@esbuild/openbsd-x64": "0.27.7", + "@esbuild/openharmony-arm64": "0.27.7", + "@esbuild/sunos-x64": "0.27.7", + "@esbuild/win32-arm64": "0.27.7", + "@esbuild/win32-ia32": "0.27.7", + "@esbuild/win32-x64": "0.27.7" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-tsconfig": { + "version": "4.14.0", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, + "node_modules/lightningcss": { + "version": "1.32.0", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.32.0", + "lightningcss-darwin-arm64": "1.32.0", + "lightningcss-darwin-x64": "1.32.0", + "lightningcss-freebsd-x64": "1.32.0", + "lightningcss-linux-arm-gnueabihf": "1.32.0", + "lightningcss-linux-arm64-gnu": "1.32.0", + "lightningcss-linux-arm64-musl": "1.32.0", + "lightningcss-linux-x64-gnu": "1.32.0", + "lightningcss-linux-x64-musl": "1.32.0", + "lightningcss-win32-arm64-msvc": "1.32.0", + "lightningcss-win32-x64-msvc": "1.32.0" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz", + "integrity": "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.32.0", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz", + "integrity": "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz", + "integrity": "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz", + "integrity": "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz", + "integrity": "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz", + "integrity": "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz", + "integrity": "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz", + "integrity": "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz", + "integrity": "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz", + "integrity": "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lucide-react": { + "version": "1.14.0", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.12", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.4", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.14", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/react": { + "version": "19.2.6", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.6", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.6" + } + }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/rolldown": { + "version": "1.0.0-rc.18", + "dev": true, + "license": "MIT", + "dependencies": { + "@oxc-project/types": "=0.128.0", + "@rolldown/pluginutils": "1.0.0-rc.18" + }, + "bin": { + "rolldown": "bin/cli.mjs" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "optionalDependencies": { + "@rolldown/binding-android-arm64": "1.0.0-rc.18", + "@rolldown/binding-darwin-arm64": "1.0.0-rc.18", + "@rolldown/binding-darwin-x64": "1.0.0-rc.18", + "@rolldown/binding-freebsd-x64": "1.0.0-rc.18", + "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-rc.18", + "@rolldown/binding-linux-arm64-gnu": "1.0.0-rc.18", + "@rolldown/binding-linux-arm64-musl": "1.0.0-rc.18", + "@rolldown/binding-linux-ppc64-gnu": "1.0.0-rc.18", + "@rolldown/binding-linux-s390x-gnu": "1.0.0-rc.18", + "@rolldown/binding-linux-x64-gnu": "1.0.0-rc.18", + "@rolldown/binding-linux-x64-musl": "1.0.0-rc.18", + "@rolldown/binding-openharmony-arm64": "1.0.0-rc.18", + "@rolldown/binding-wasm32-wasi": "1.0.0-rc.18", + "@rolldown/binding-win32-arm64-msvc": "1.0.0-rc.18", + "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.18" + } + }, + "node_modules/rolldown/node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.18", + "dev": true, + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.27.0", + "license": "MIT" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.16", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD", + "optional": true + }, + "node_modules/tsx": { + "version": "4.21.0", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "~0.27.0", + "get-tsconfig": "^4.7.5" + }, + "bin": { + "tsx": "dist/cli.mjs" + }, + "engines": { + "node": ">=18.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + } + }, + "node_modules/typescript": { + "version": "6.0.3", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "7.19.2", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "8.0.11", + "dev": true, + "license": "MIT", + "dependencies": { + "lightningcss": "^1.32.0", + "picomatch": "^4.0.4", + "postcss": "^8.5.14", + "rolldown": "1.0.0-rc.18", + "tinyglobby": "^0.2.16" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "@vitejs/devtools": "^0.1.18", + "esbuild": "^0.27.0 || ^0.28.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "@vitejs/devtools": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + } + } +} diff --git a/examples/react-ai-chat/package.json b/examples/react-ai-chat/package.json new file mode 100644 index 000000000..f72121130 --- /dev/null +++ b/examples/react-ai-chat/package.json @@ -0,0 +1,42 @@ +{ + "name": "kalamdb-react-ai-chat", + "version": "1.0.0", + "private": true, + "type": "module", + "description": "KalamDB React live-query AI chat example with conversations, files, typing, tools, and approvals", + "scripts": { + "presetup": "bash scripts/ensure-sdk.sh", + "setup": "bash setup.sh", + "pregenerate:schema": "bash scripts/ensure-sdk.sh", + "generate:schema": "bash scripts/generate-schema.sh", + "predev": "bash scripts/ensure-sdk.sh", + "dev": "vite --host 127.0.0.1", + "pretest": "bash scripts/ensure-sdk.sh", + "prebuild": "bash scripts/ensure-sdk.sh", + "build": "tsc && vite build", + "preview": "vite preview --host 127.0.0.1", + "preagent": "bash scripts/ensure-sdk.sh", + "agent": "tsx src/agent/index.ts", + "test": "tsc && tsx --test tests/agent.test.ts && node --test tests/chat.spec.mjs" + }, + "dependencies": { + "@kalamdb/client": "file:../../link/sdks/typescript/client", + "@kalamdb/consumer": "file:../../link/sdks/typescript/consumer", + "@kalamdb/orm": "file:../../link/sdks/typescript/orm", + "@kalamdb/react": "file:../../link/sdks/typescript/react-old", + "dotenv": "^17.4.2", + "drizzle-orm": "^0.45.2", + "lucide-react": "^1.14.0", + "react": "^19.2.5", + "react-dom": "^19.2.5" + }, + "devDependencies": { + "@types/node": "^25.6.0", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^6.0.1", + "tsx": "^4.21.0", + "typescript": "^6.0.3", + "vite": "^8.0.10" + } +} \ No newline at end of file diff --git a/examples/react-ai-chat/scripts/ensure-sdk.sh b/examples/react-ai-chat/scripts/ensure-sdk.sh new file mode 100644 index 000000000..ab89d7734 --- /dev/null +++ b/examples/react-ai-chat/scripts/ensure-sdk.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)" + +ensure_package() { + local dir="$1" + local build_cmd="$2" + if [ ! -d "$ROOT_DIR/$dir/dist" ]; then + (cd "$ROOT_DIR/$dir" && npm install --no-package-lock && npm run "$build_cmd") + fi +} + +ensure_package "link/sdks/typescript/client" "build:ts" +ensure_package "link/sdks/typescript/orm" "build" +ensure_package "link/sdks/typescript/react-old" "build" \ No newline at end of file diff --git a/examples/react-ai-chat/scripts/generate-schema.sh b/examples/react-ai-chat/scripts/generate-schema.sh new file mode 100644 index 000000000..bec35bf84 --- /dev/null +++ b/examples/react-ai-chat/scripts/generate-schema.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -euo pipefail + +echo "examples/react-ai-chat uses the checked-in src/app/schema.generated.ts for quick local runs." +echo "For a live server, run chat-app.sql, then regenerate with kalamdb-orm if you want schema drift checks." \ No newline at end of file diff --git a/examples/react-ai-chat/setup.sh b/examples/react-ai-chat/setup.sh new file mode 100755 index 000000000..3ebaca9c6 --- /dev/null +++ b/examples/react-ai-chat/setup.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SERVER_URL="${KALAMDB_URL:-http://127.0.0.1:8080}" +USER="${KALAMDB_USER:-admin}" +PASSWORD="${KALAMDB_PASSWORD:-kalamdb123}" +SQL_FILE="$SCRIPT_DIR/chat-app.sql" +ENV_FILE="$SCRIPT_DIR/.env.local" + +fail() { + echo "[setup][error] $*" >&2 + exit 1 +} + +require_cmd() { + command -v "$1" >/dev/null 2>&1 || fail "Missing required command: $1" +} + +run_kalam() { + kalam \ + --url "$SERVER_URL" \ + --user "$USER" \ + --password "$PASSWORD" \ + --no-spinner \ + "$@" +} + +drop_topic_if_present() { + local topic="$1" + if ! run_kalam --command "DROP TOPIC $topic" >/dev/null 2>&1; then + : + fi +} + +echo "Building local SDK packages..." +bash scripts/ensure-sdk.sh + +require_cmd kalam + +echo "Clearing prior example topics if they exist..." +drop_topic_if_present "react_ai_chat.agent_actions" +drop_topic_if_present "react_ai_chat.agent_messages" + +echo "Importing $(basename "$SQL_FILE") with kalam CLI..." +run_kalam --file "$SQL_FILE" + +cat > "$ENV_FILE" <; + +function field(row: TopicRow, key: string): string { + const value = row[key]; + return typeof value === 'string' ? value : value == null ? '' : String(value); +} + +function validUser(user: string): string { + if (!/^[A-Za-z0-9._-]+$/.test(user)) { + throw new Error(`Unsupported KalamDB user: ${user}`); + } + return user; +} + +async function sleep(ms: number): Promise { + await new Promise((resolve) => setTimeout(resolve, ms)); +} + +async function runSqlAsUser(client: KalamDBClient, sql: string, params?: unknown[]): Promise { + await client.executeAsUser(sql, validUser(KALAMDB_USER), params); +} + +async function insertTypingToken( + client: KalamDBClient, + conversationId: string, + messageId: string, + status: string, + token: string, +): Promise { + await runSqlAsUser( + client, + 'INSERT INTO react_ai_chat.typing_tokens (conversation_id, message_id, status, token) VALUES ($1, $2, $3, $4)', + [conversationId, messageId, status, token], + ); +} + +async function insertAssistantMessage( + client: KalamDBClient, + values: { + clientId?: string; + conversationId: string; + replyToMessageId: string; + body: string; + status: string; + approvalId?: string; + }, +): Promise { + await runSqlAsUser( + client, + 'INSERT INTO react_ai_chat.messages (client_id, conversation_id, reply_to_message_id, role, body, status, approval_id) VALUES ($1, $2, $3, $4, $5, $6, $7)', + [ + values.clientId ?? null, + values.conversationId, + values.replyToMessageId, + 'assistant', + values.body, + values.status, + values.approvalId ?? null, + ], + ); +} + +async function insertApproval( + client: KalamDBClient, + values: { + id: string; + conversationId: string; + messageId: string; + title: string; + body: string; + }, +): Promise { + await runSqlAsUser( + client, + 'INSERT INTO react_ai_chat.approvals (id, conversation_id, message_id, title, body, status) VALUES ($1, $2, $3, $4, $5, $6)', + [values.id, values.conversationId, values.messageId, values.title, values.body, 'pending'], + ); +} + +async function updateApproval(client: KalamDBClient, approvalId: string, status: string): Promise { + await runSqlAsUser( + client, + 'UPDATE react_ai_chat.approvals SET status = $1, updated_at = NOW() WHERE id = $2', + [status, approvalId], + ); +} + +async function readApproval(client: KalamDBClient, approvalId: string): Promise { + const rows = await client.queryAll( + `EXECUTE AS USER '${validUser(KALAMDB_USER)}' (SELECT * FROM react_ai_chat.approvals WHERE id = $1)`, + [approvalId], + ); + const row = rows[0]; + if (!row) { + return null; + } + + return Object.fromEntries(Object.entries(row).map(([key, value]) => [key, cellString(value)])); +} + +async function streamThenInsertReply( + client: KalamDBClient, + conversationId: string, + replyToMessageId: string, + body: string, +): Promise { + const draftMessageId = `draft-${replyToMessageId}-${Date.now()}`; + await insertTypingToken(client, conversationId, draftMessageId, 'thinking', 'Thinking through the next step. '); + await sleep(STREAM_DELAY_MS); + + for (const token of splitIntoTokenChunks(body)) { + await insertTypingToken(client, conversationId, draftMessageId, 'typing', token); + await sleep(STREAM_DELAY_MS); + } + + await insertTypingToken(client, conversationId, draftMessageId, 'saving', 'Saving final answer.'); + await insertAssistantMessage(client, { + clientId: draftMessageId, + conversationId, + replyToMessageId, + body, + status: 'complete', + }); +} + +async function handleUserMessage(client: KalamDBClient, row: TopicRow): Promise { + if (field(row, 'role') !== 'user' || field(row, 'status') !== 'sent') { + return; + } + + const messageId = field(row, 'id'); + const conversationId = field(row, 'conversation_id'); + const body = field(row, 'body'); + const plan = createToolPlan(body); + + if (plan.requiresApproval) { + const approvalId = `approval-${messageId}`; + await insertApproval(client, { + id: approvalId, + conversationId, + messageId, + title: plan.approvalTitle, + body: plan.approvalBody, + }); + await insertAssistantMessage(client, { + conversationId, + replyToMessageId: messageId, + body: buildApprovalMessage(body), + status: 'awaiting_approval', + approvalId, + }); + return; + } + + await streamThenInsertReply(client, conversationId, messageId, buildAssistantReply(body)); +} + +async function handleApprovalAction(client: KalamDBClient, row: TopicRow): Promise { + const action = field(row, 'action'); + const approvalId = field(row, 'approval_id'); + const conversationId = field(row, 'conversation_id'); + const approval = await readApproval(client, approvalId); + const sourceMessageId = field(approval ?? {}, 'message_id') || approvalId; + + if (action === 'declined') { + await updateApproval(client, approvalId, 'declined'); + await insertAssistantMessage(client, { + conversationId, + replyToMessageId: sourceMessageId, + body: 'Approval was declined, so I stopped the action and left the workspace unchanged.', + status: 'complete', + }); + return; + } + + await updateApproval(client, approvalId, 'approved'); + await streamThenInsertReply(client, conversationId, sourceMessageId, buildAssistantReply('approval granted')); +} + +export async function startReactAiChatAgent(stopSignal?: AbortSignal): Promise { + const client = createConsumerClient({ + url: KALAMDB_URL, + authProvider: async () => Auth.basic(KALAMDB_USER, KALAMDB_PASSWORD), + }); + const sqlClient = client as unknown as KalamDBClient; + + await Promise.all([ + runAgent({ + client, + name: 'react-ai-chat-message-agent', + topic: MESSAGE_TOPIC, + groupId: process.env.KALAMDB_GROUP ?? 'react-ai-chat-message-agent', + start: 'earliest', + batchSize: 10, + timeoutSeconds: 30, + stopSignal, + onRow: async (_ctx, row) => handleUserMessage(sqlClient, row), + }), + runAgent({ + client, + name: 'react-ai-chat-action-agent', + topic: ACTION_TOPIC, + groupId: process.env.KALAMDB_ACTION_GROUP ?? 'react-ai-chat-action-agent', + start: 'earliest', + batchSize: 10, + timeoutSeconds: 30, + stopSignal, + onRow: async (_ctx, row) => handleApprovalAction(sqlClient, row), + }), + ]); +} + +function cellString(value: unknown): string { + if (value && typeof value === 'object' && 'asString' in value && typeof value.asString === 'function') { + return value.asString() ?? ''; + } + return value == null ? '' : String(value); +} + +if (process.argv[1] && fileURLToPath(import.meta.url) === process.argv[1]) { + const controller = new AbortController(); + process.on('SIGINT', () => controller.abort()); + process.on('SIGTERM', () => controller.abort()); + + startReactAiChatAgent(controller.signal).catch((error) => { + console.error('react-ai-chat-agent failed', error); + process.exit(1); + }); +} \ No newline at end of file diff --git a/examples/react-ai-chat/src/agent/logic.ts b/examples/react-ai-chat/src/agent/logic.ts new file mode 100644 index 000000000..495c6ce5f --- /dev/null +++ b/examples/react-ai-chat/src/agent/logic.ts @@ -0,0 +1,67 @@ +export type ToolPlan = { + toolName: string; + requiresApproval: boolean; + approvalTitle: string; + approvalBody: string; +}; + +export function createToolPlan(message: string): ToolPlan { + const lower = message.toLowerCase(); + const requiresApproval = lower.includes('migrate') + || lower.includes('database') + || lower.includes('customer') + || lower.includes('refund') + || lower.includes('approval'); + + if (requiresApproval) { + return { + toolName: 'human_approval', + requiresApproval: true, + approvalTitle: 'Action Required', + approvalBody: lower.includes('project alpha') + ? 'Run database migration for Project Alpha?' + : 'Approve the assistant before it performs this customer-facing or data-changing step.', + }; + } + + return { + toolName: lower.includes('deploy') || lower.includes('release') + ? 'release_lookup' + : lower.includes('analysis') || lower.includes('outlier') ? 'analysis_sandbox' : 'conversation_search', + requiresApproval: false, + approvalTitle: '', + approvalBody: '', + }; +} + +export function buildApprovalMessage(message: string): string { + const subject = message.trim() || 'this request'; + return `I can continue with "${subject}", but this step needs a human decision to approve or decline before the agent proceeds.`; +} + +export function buildAssistantReply(message: string): string { + const subject = message.trim() || 'the latest request'; + const lower = subject.toLowerCase(); + + if (lower.includes('approved') || lower.includes('approval granted')) { + return 'Approval received. I continued the database migration plan, verified the dependent checks, and recorded the next safe action for Project Alpha.'; + } + + if (lower.includes('outlier') || lower.includes('analysis') || lower.includes('spreadsheet')) { + return 'I checked the spreadsheet, isolated the European market outliers, and prepared a compact summary for the Project Alpha workspace.'; + } + + if (lower.includes('deploy') || lower.includes('release')) { + return `I reviewed "${subject}" with the release context and found the next deployment check to run.`; + } + + return `I reviewed "${subject}" against the conversation context and prepared the next concise step.`; +} + +export function splitIntoTokenChunks(text: string, chunkSize = 24): string[] { + const chunks: string[] = []; + for (let index = 0; index < text.length; index += chunkSize) { + chunks.push(text.slice(index, index + chunkSize)); + } + return chunks; +} \ No newline at end of file diff --git a/examples/react-ai-chat/src/app/App.tsx b/examples/react-ai-chat/src/app/App.tsx new file mode 100644 index 000000000..fcbbe165f --- /dev/null +++ b/examples/react-ai-chat/src/app/App.tsx @@ -0,0 +1,129 @@ +import { useEffect, useMemo, useState } from 'react'; +import { KalamProvider, LiveQueries, type MultiLiveQueryContext } from '@kalamdb/react'; +import { asc, desc, eq } from 'drizzle-orm'; +import { getExampleClient, isExampleDemoMode } from './client'; +import { Aside } from './components/Aside'; +import { Conversation } from './components/Conversation'; +import { approvalActions, conversations, messages, typingTokens } from './schema.generated'; +import type { ConversationRow } from './schema.generated'; + +const SELECTED_CONVERSATION_KEY = 'kalamdb-react-ai-chat-selected-v3'; +const DEFAULT_CONVERSATION_ID = 'project-alpha'; + +type ChatQueries = { + conversations: { table: typeof conversations }; + messages: { table: typeof messages }; + typingTokens: { table: typeof typingTokens }; +}; + +export type ChatLiveContext = MultiLiveQueryContext; + +export function App() { + const client = useMemo(() => getExampleClient(), []); + const [selectedConversationId, setSelectedConversationId] = useState(loadSelectedConversationId); + + useEffect(() => { + window.localStorage.setItem(SELECTED_CONVERSATION_KEY, selectedConversationId); + }, [selectedConversationId]); + + const queries = useMemo(() => ({ + conversations: { + table: conversations, + orderBy: (table: typeof conversations) => desc(table.updatedAt), + limit: 50, + }, + messages: { + table: messages, + where: (table: typeof messages) => eq(table.conversationId, selectedConversationId), + orderBy: (table: typeof messages) => asc(table.createdAt), + deps: [selectedConversationId], + }, + typingTokens: { + table: typingTokens, + where: (table: typeof typingTokens) => eq(table.conversationId, selectedConversationId), + orderBy: (table: typeof typingTokens) => asc(table.createdAt), + deps: [selectedConversationId], + }, + }), [selectedConversationId]); + + return ( + + + {(live) => ( + + )} + + + ); +} + +function ChatWorkspace({ + live, + selectedConversationId, + onSelectConversation, +}: { + live: ChatLiveContext; + selectedConversationId: string; + onSelectConversation: (conversationId: string) => void; +}) { + const currentConversation = useMemo( + () => resolveConversation(live.conversations.rows, selectedConversationId), + [live.conversations.rows, selectedConversationId], + ); + + useEffect(() => { + if (!currentConversation && live.conversations.rows[0]) { + onSelectConversation(live.conversations.rows[0].id); + } + }, [currentConversation, live.conversations.rows, onSelectConversation]); + + const createConversation = async () => { + const id = createId('conversation'); + await live.insert(conversations).values({ + id, + title: 'New chat', + summary: 'Fresh conversation', + createdAt: new Date(), + updatedAt: new Date(), + }); + onSelectConversation(id); + }; + + return ( +
+
+ ); +} + +function resolveConversation(rows: ConversationRow[], selectedConversationId: string): ConversationRow | null { + return rows.find((conversation) => conversation.id === selectedConversationId) ?? rows[0] ?? null; +} + +function createId(prefix: string): string { + return `${prefix}-${crypto.randomUUID?.() ?? `${Date.now()}-${Math.random().toString(16).slice(2)}`}`; +} + +function loadSelectedConversationId(): string { + return window.localStorage.getItem(SELECTED_CONVERSATION_KEY) ?? DEFAULT_CONVERSATION_ID; +} \ No newline at end of file diff --git a/examples/react-ai-chat/src/app/client.ts b/examples/react-ai-chat/src/app/client.ts new file mode 100644 index 000000000..6fd1469bf --- /dev/null +++ b/examples/react-ai-chat/src/app/client.ts @@ -0,0 +1,31 @@ +import { Auth, createClient, type KalamDBClient } from '@kalamdb/client'; +import { createDemoClient } from './demo-client'; + +const DEMO_MODE = import.meta.env.VITE_KALAMDB_DEMO_MODE !== 'false'; + +let singleton: KalamDBClient | null = null; + +export function isExampleDemoMode(): boolean { + return DEMO_MODE; +} + +export function getExampleClient(): KalamDBClient { + if (singleton) { + return singleton; + } + + if (DEMO_MODE) { + singleton = createDemoClient(); + return singleton; + } + + singleton = createClient({ + url: import.meta.env.VITE_KALAMDB_URL ?? 'http://127.0.0.1:8080', + authProvider: async () => Auth.basic( + import.meta.env.VITE_KALAMDB_USER ?? 'admin', + import.meta.env.VITE_KALAMDB_PASSWORD ?? 'kalamdb123', + ), + disableCompression: true, + }); + return singleton; +} \ No newline at end of file diff --git a/examples/react-ai-chat/src/app/components/Aside.tsx b/examples/react-ai-chat/src/app/components/Aside.tsx new file mode 100644 index 000000000..2be4a5d58 --- /dev/null +++ b/examples/react-ai-chat/src/app/components/Aside.tsx @@ -0,0 +1,60 @@ +import { HelpCircle, MessageSquarePlus, Settings } from 'lucide-react'; +import type { ConversationRow } from '../schema.generated'; + +const dateFormatter = new Intl.DateTimeFormat(undefined, { month: 'short', day: 'numeric' }); + +export function Aside({ + conversations, + selectedConversationId, + onCreate, + onSelect, +}: { + conversations: ConversationRow[]; + selectedConversationId: string; + onCreate: () => void; + onSelect: (conversationId: string) => void; +}) { + return ( + + ); +} \ No newline at end of file diff --git a/examples/react-ai-chat/src/app/components/ChatComposer.tsx b/examples/react-ai-chat/src/app/components/ChatComposer.tsx new file mode 100644 index 000000000..394f844ae --- /dev/null +++ b/examples/react-ai-chat/src/app/components/ChatComposer.tsx @@ -0,0 +1,83 @@ +import { Paperclip, SendHorizontal, X } from 'lucide-react'; +import { useRef, useState } from 'react'; + +export function ChatComposer({ + disabled, + onSend, +}: { + disabled: boolean; + onSend: (body: string, attachment: File | null) => Promise; +}) { + const [body, setBody] = useState(''); + const [attachment, setAttachment] = useState(null); + const inputRef = useRef(null); + const canSend = body.trim().length > 0 && !disabled; + + const submit = async () => { + const trimmed = body.trim(); + if (!trimmed || disabled) { + return; + } + + await onSend(trimmed, attachment); + setBody(''); + setAttachment(null); + if (inputRef.current) { + inputRef.current.value = ''; + } + }; + + return ( +
{ + event.preventDefault(); + void submit(); + }} + > +