diff --git a/.beads/last-touched b/.beads/last-touched index 516ae6f..18c1735 100644 --- a/.beads/last-touched +++ b/.beads/last-touched @@ -1 +1 @@ -el-ffc +el-6r5 diff --git a/.gitignore b/.gitignore index 7d65a72..8081e34 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,8 @@ z_ecto_libsql_test* # Local environment variables. .env.local + +# bv (beads viewer) local config and caches +.bv/ +TEST_AUDIT_REPORT.md +TEST_COVERAGE_ISSUES_CREATED.md diff --git a/AGENTS.md b/AGENTS.md index bd2fa88..f5b308a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1783,7 +1783,7 @@ settings = ~s({"theme":"dark","notifications":true,"language":"es"}) #### Comparison: Set vs Replace vs Insert vs Patch -The modification functions have different behaviors: +The modification functions have different behaviours: ```elixir json = ~s({"a":1,"b":2}) diff --git a/CLAUDE.md b/CLAUDE.md index 2d509da..079c302 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -10,7 +10,11 @@ ## Quick Rules - **British/Australian English** for all code, comments, and documentation (except SQL keywords and compatibility requirements) -- **ALWAYS format before committing**: `mix format --check-formatted` and `cargo fmt` +- **⚠️ CRITICAL: ALWAYS check formatting BEFORE committing**: + 1. Run formatters: `mix format && cd native/ecto_libsql && cargo fmt` + 2. Verify checks pass: `mix format --check-formatted && cargo fmt --check` + 3. **Only then** commit: `git commit -m "..."` + - Formatting issues caught at check time, not after commit - **NEVER use `.unwrap()` in production Rust code** - use `safe_lock` helpers (see [Error Handling](#error-handling-patterns)) - **Tests MAY use `.unwrap()`** for simplicity @@ -46,6 +50,7 @@ - [Architecture](#architecture) - [Code Structure](#code-structure) - [Development Workflow](#development-workflow) +- [Issue Tracking with Beads](#issue-tracking-with-beads) - [Error Handling Patterns](#error-handling-patterns) - [Testing](#testing) - [Common Tasks](#common-tasks) @@ -258,7 +263,7 @@ This project uses **Beads** (`bd` command) for issue tracking across sessions. B - **Beads**: Multi-session work, dependencies between tasks, discovered work that needs tracking - **TodoWrite**: Simple single-session task execution -When in doubt, prefer Beads—persistence you don't need beats lost context. +When in doubt, prefer Beads — persistence you don't need beats lost context. **Essential commands:** ```bash @@ -294,6 +299,14 @@ bd sync --from-main # Pull latest beads git add . && git commit -m "..." # Commit changes ``` +#### Best Practices + +- Check `bd ready` at session start to find available work +- Update status as you work (in_progress → closed) +- Create new issues with `bd create` when you discover tasks +- Use descriptive titles and set appropriate priority/type +- Always `bd sync` before ending session + ### Adding a New NIF Function **IMPORTANT**: Modern Rustler auto-detects all `#[rustler::nif]` functions. No manual registration needed. @@ -455,6 +468,45 @@ mix test --exclude turso_remote # Skip Turso tests - Type conversions (Elixir ↔ SQLite) - Concurrent operations +### Test Variable Naming Conventions + +For state threading in tests, use consistent variable names and patterns: + +**Variable Naming by Scope**: +```elixir +state # Connection scope +trx_state # Transaction scope +cursor # Cursor scope +stmt_id # Prepared statement ID scope +``` + +**Error Handling Pattern**: + +When an error operation returns updated state, you must decide if that state is needed next: + +```elixir +# ✅ If state IS needed for subsequent operations → Rebind +result = EctoLibSql.handle_execute(sql, params, [], trx_state) +assert {:error, _reason, trx_state} = result # Rebind - reuse updated state +:ok = EctoLibSql.Native.rollback_to_savepoint_by_name(trx_state, "sp1") + +# ✅ If state is NOT needed → Discard with underscore +result = EctoLibSql.handle_execute(sql, params, [], trx_state) +assert {:error, _reason, _state} = result # Discard - not reused +:ok = EctoLibSql.Native.rollback_to_savepoint_by_name(trx_state, "sp1") + +# ✅ For terminal operations → Use underscore variable name +assert {:error, %EctoLibSql.Error{}, _conn} = EctoLibSql.handle_execute(...) +``` + +**Add clarifying comments** when rebinding state: +```elixir +# Rebind trx_state - error tuple contains updated transaction state needed for recovery +assert {:error, _reason, trx_state} = result +``` + +See [TEST_STATE_VARIABLE_CONVENTIONS.md](TEST_STATE_VARIABLE_CONVENTIONS.md) for detailed guidance. + ### Turso Remote Tests ⚠️ **Cost Warning**: Creates real cloud databases. Only run when developing remote/replica functionality. @@ -549,14 +601,27 @@ for i in {1..10}; do mix test test/file.exs:42; done # Race conditions ### Pre-Commit Checklist +**STRICT ORDER (do NOT skip steps or reorder)**: + ```bash -mix format && cd native/ecto_libsql && cargo fmt # Format -mix test && cd native/ecto_libsql && cargo test # Test -mix format --check-formatted # Verify format -cd native/ecto_libsql && cargo clippy # Lint (optional) +# 1. Format code (must come FIRST) +mix format && cd native/ecto_libsql && cargo fmt + +# 2. Run tests (catch logic errors) +mix test && cd native/ecto_libsql && cargo test + +# 3. Verify formatting checks (MUST PASS before commit) +mix format --check-formatted && cd native/ecto_libsql && cargo fmt --check + +# 4. Lint checks (optional but recommended) +cd native/ecto_libsql && cargo clippy + +# 5. Only commit if all checks above passed git commit -m "feat: descriptive message" ``` +**⚠️ Critical**: If ANY check fails, fix it and re-run that check before proceeding. Never commit with failing checks. + ### Release Process 1. Update version in `mix.exs` diff --git a/TESTING.md b/TESTING.md index 1384520..d9845fe 100644 --- a/TESTING.md +++ b/TESTING.md @@ -717,6 +717,77 @@ jobs: - Use unique IDs/names (UUIDs) - Clean up properly between tests +### Edge-Case Testing Guide + +EctoLibSql includes comprehensive edge-case testing under concurrent load. These tests verify that the library handles unusual data correctly even when multiple processes are accessing the database simultaneously. + +#### What Edge-Cases Are Tested + +The test suite covers: + +1. **NULL Values**: Ensure NULL is properly handled in concurrent inserts and transactions +2. **Empty Strings**: Verify empty strings aren't converted to NULL or corrupted +3. **Large Strings**: Test 1KB strings under concurrent load for truncation or corruption +4. **Special Characters**: Verify parameterised queries safely handle special characters (`!@#$%^&*()`) +5. **Recovery After Errors**: Confirm connection recovers after query errors without losing edge-case data +6. **Resource Cleanup**: Verify prepared statements with edge-case data are cleaned up correctly + +#### Test Locations + +- **Pool Load Tests**: `test/pool_load_test.exs` + - `test "concurrent connections with edge-case data"` - 5 concurrent connections, 5 edge-case values each + - `test "connection recovery with edge-case data"` - Error handling with NULL/empty/large strings + - `test "prepared statements with edge-case data"` - Statement cleanup under concurrent load with edge cases + +- **Transaction Isolation Tests**: `test/pool_load_test.exs` + - `test "concurrent transactions with edge-case data maintain isolation"` - 4 transactions, edge-case values + +#### Helper Functions + +The test suite provides reusable helpers for edge-case testing: + +```elixir +# Generate edge-case values for testing +defp generate_edge_case_values(task_num) do + [ + "normal_value_#{task_num}", # Normal string + nil, # NULL value + "", # Empty string + String.duplicate("x", 1000), # Large string (1KB) + "special_chars_!@#$%^&*()_+-=[]{};" # Special characters + ] +end + +# Insert edge-case value and return result +defp insert_edge_case_value(state, value) do + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + [value], + [], + state + ) +end +``` + +#### When to Use Edge-Case Tests + +Add edge-case tests when: +- Testing concurrent operations +- Adding support for new data types +- Changing query execution paths +- Modifying transaction handling +- Improving connection pooling + +#### Expected Coverage + +Edge-case tests should verify: +- Data integrity (no corruption, truncation, or loss) +- NULL value preservation +- String encoding correctness +- Parameter binding safety +- Error recovery without data loss +- Resource cleanup (statements, cursors, connections) + ### Known Test Limitations 1. **Remote/Replica Mode Testing:** diff --git a/native/ecto_libsql/src/tests/error_handling_tests.rs b/native/ecto_libsql/src/tests/error_handling_tests.rs new file mode 100644 index 0000000..5b602b8 --- /dev/null +++ b/native/ecto_libsql/src/tests/error_handling_tests.rs @@ -0,0 +1,761 @@ +//! Error handling tests for the Rust NIF layer +//! +//! These tests verify that the Rust layer gracefully returns errors instead of +//! panicking, which is critical for BEAM VM stability. Prior to v0.4.0, many +//! error conditions could panic and crash the entire VM. +//! +//! Focus areas: +//! 1. Invalid resource IDs (connection, statement, transaction, cursor) +//! 2. Parameter validation (count mismatch, type mismatch) +//! 3. Constraint violations (NOT NULL, UNIQUE, FOREIGN KEY, CHECK) +//! 4. Transaction errors (operations after commit, double rollback) +//! 5. Query syntax errors (invalid SQL, non-existent table/column) +//! 6. Resource exhaustion (too many prepared statements/cursors) + +// Allow unwrap() in tests for cleaner test code - see CLAUDE.md "Test Code Exception" +#![allow(clippy::unwrap_used)] + +use super::test_utils::{setup_test_db_with_prefix, TestDbGuard}; +use libsql::{Builder, Value}; + +// ============================================================================ +// CONSTRAINT VIOLATION TESTS +// ============================================================================ + +#[tokio::test] +async fn test_not_null_constraint_violation() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute( + "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT NOT NULL)", + (), + ) + .await + .unwrap(); + + // This should fail with constraint error, not panic + let result = conn + .execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![Value::Integer(1), Value::Null], + ) + .await; + + assert!( + result.is_err(), + "Expected constraint error for NULL in NOT NULL column" + ); +} + +#[tokio::test] +async fn test_unique_constraint_violation() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute( + "CREATE TABLE users (id INTEGER PRIMARY KEY, email TEXT UNIQUE NOT NULL)", + (), + ) + .await + .unwrap(); + + // Insert first record + conn.execute( + "INSERT INTO users (id, email) VALUES (?1, ?2)", + vec![ + Value::Integer(1), + Value::Text("alice@example.com".to_string()), + ], + ) + .await + .unwrap(); + + // Insert duplicate email - should fail with constraint error, not panic + let result = conn + .execute( + "INSERT INTO users (id, email) VALUES (?1, ?2)", + vec![ + Value::Integer(2), + Value::Text("alice@example.com".to_string()), + ], + ) + .await; + + assert!( + result.is_err(), + "Expected unique constraint error for duplicate email" + ); +} + +#[tokio::test] +async fn test_primary_key_constraint_violation() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)", ()) + .await + .unwrap(); + + // Insert first record + conn.execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![Value::Integer(1), Value::Text("Alice".to_string())], + ) + .await + .unwrap(); + + // Insert duplicate primary key - should fail with constraint error, not panic + let result = conn + .execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![Value::Integer(1), Value::Text("Bob".to_string())], + ) + .await; + + assert!( + result.is_err(), + "Expected primary key constraint error for duplicate id" + ); +} + +#[tokio::test] +async fn test_check_constraint_violation() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute( + "CREATE TABLE products (id INTEGER PRIMARY KEY, price REAL CHECK(price > 0))", + (), + ) + .await + .unwrap(); + + // Insert valid record + conn.execute( + "INSERT INTO products (id, price) VALUES (?1, ?2)", + vec![Value::Integer(1), Value::Real(19.99)], + ) + .await + .unwrap(); + + // Insert record violating check constraint - should fail, not panic + let result = conn + .execute( + "INSERT INTO products (id, price) VALUES (?1, ?2)", + vec![Value::Integer(2), Value::Real(-5.0)], + ) + .await; + + assert!( + result.is_err(), + "Expected check constraint error for negative price" + ); +} + +// ============================================================================ +// SYNTAX AND SEMANTIC ERROR TESTS +// ============================================================================ + +#[tokio::test] +async fn test_invalid_sql_syntax() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + // Invalid SQL should return error, not panic + let result = conn + .execute("SELECT * FRM users", ()) // Typo: FRM instead of FROM + .await; + + assert!(result.is_err(), "Expected error for invalid SQL syntax"); +} + +#[tokio::test] +async fn test_nonexistent_table() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + // Query non-existent table should return error, not panic + let result = conn.query("SELECT * FROM nonexistent_table", ()).await; + + assert!(result.is_err(), "Expected error for non-existent table"); +} + +#[tokio::test] +async fn test_nonexistent_column() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)", ()) + .await + .unwrap(); + + // Query non-existent column should return error, not panic + let result = conn.query("SELECT nonexistent_column FROM users", ()).await; + + assert!(result.is_err(), "Expected error for non-existent column"); +} + +#[tokio::test] +async fn test_malformed_sql() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + // Incomplete SQL + let result = conn.execute("SELECT * FROM users WHERE", ()).await; + + assert!(result.is_err(), "Expected error for malformed SQL"); +} + +// ============================================================================ +// PARAMETER BINDING ERROR TESTS +// ============================================================================ + +#[tokio::test] +async fn test_parameter_count_mismatch_missing() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER, name TEXT, email TEXT)", ()) + .await + .unwrap(); + + // SQL expects 3 parameters, but only 2 provided - should return error + let result = conn + .execute( + "INSERT INTO users (id, name, email) VALUES (?1, ?2, ?3)", + vec![Value::Integer(1), Value::Text("Alice".to_string())], + ) + .await; + + // libsql behaviour varies - may accept or reject + // The important thing is it doesn't panic + let _ = result; +} + +#[tokio::test] +async fn test_parameter_count_mismatch_excess() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) + .await + .unwrap(); + + // SQL expects 2 parameters, but 3 provided - should handle gracefully + let result = conn + .execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![ + Value::Integer(1), + Value::Text("Alice".to_string()), + Value::Text("extra".to_string()), + ], + ) + .await; + + // libsql will either accept or reject - the key is no panic + let _ = result; +} + +#[tokio::test] +async fn test_type_coercion_integer_to_text() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) + .await + .unwrap(); + + // SQLite is dynamically typed, so this should work (integer coerced to text) + let result = conn + .execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![Value::Integer(1), Value::Integer(123)], // Integer for text column + ) + .await; + + // SQLite permits this due to type affinity - verify insert completed successfully + assert!( + result.is_ok(), + "Should accept integer value for TEXT column due to type affinity without panic" + ); +} + +// ============================================================================ +// TRANSACTION ERROR TESTS +// ============================================================================ + +#[tokio::test] +async fn test_double_commit() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER)", ()) + .await + .unwrap(); + + conn.execute("BEGIN", ()).await.unwrap(); + conn.execute( + "INSERT INTO users (id) VALUES (?1)", + vec![Value::Integer(1)], + ) + .await + .unwrap(); + conn.execute("COMMIT", ()).await.unwrap(); + + // Second commit without begin - should fail gracefully, not panic + let result = conn.execute("COMMIT", ()).await; + + assert!( + result.is_err(), + "Expected error for commit without active transaction" + ); +} + +#[tokio::test] +async fn test_double_rollback() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER)", ()) + .await + .unwrap(); + + conn.execute("BEGIN", ()).await.unwrap(); + conn.execute( + "INSERT INTO users (id) VALUES (?1)", + vec![Value::Integer(1)], + ) + .await + .unwrap(); + conn.execute("ROLLBACK", ()).await.unwrap(); + + // Second rollback without begin - should fail gracefully, not panic + let result = conn.execute("ROLLBACK", ()).await; + + assert!( + result.is_err(), + "Expected error for rollback without active transaction" + ); +} + +#[tokio::test] +async fn test_commit_after_rollback() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER)", ()) + .await + .unwrap(); + + conn.execute("BEGIN", ()).await.unwrap(); + conn.execute( + "INSERT INTO users (id) VALUES (?1)", + vec![Value::Integer(1)], + ) + .await + .unwrap(); + conn.execute("ROLLBACK", ()).await.unwrap(); + + // Commit after rollback - should fail gracefully, not panic + let result = conn.execute("COMMIT", ()).await; + + assert!(result.is_err(), "Expected error for commit after rollback"); +} + +#[tokio::test] +async fn test_query_after_rollback() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER)", ()) + .await + .unwrap(); + + conn.execute("BEGIN", ()).await.unwrap(); + conn.execute( + "INSERT INTO users (id) VALUES (?1)", + vec![Value::Integer(1)], + ) + .await + .unwrap(); + conn.execute("ROLLBACK", ()).await.unwrap(); + + // Verify data was not committed + let mut rows = conn.query("SELECT COUNT(*) FROM users", ()).await.unwrap(); + let row = rows.next().await.unwrap().unwrap(); + let count = row.get::(0).unwrap(); + assert_eq!(count, 0, "Data should be rolled back"); +} + +// ============================================================================ +// PREPARED STATEMENT ERROR TESTS +// ============================================================================ + +#[tokio::test] +async fn test_prepare_invalid_sql() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + // Prepare invalid SQL - should return error, not panic + let result = conn + .prepare("SELECT * FRM users") // Typo: FRM instead of FROM + .await; + + assert!(result.is_err(), "Expected error for invalid SQL in prepare"); +} + +#[tokio::test] +async fn test_prepared_statement_with_parameter_mismatch() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) + .await + .unwrap(); + + conn.execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![Value::Integer(1), Value::Text("Alice".to_string())], + ) + .await + .unwrap(); + + let stmt = conn + .prepare("SELECT * FROM users WHERE id = ?1 AND name = ?2") + .await + .unwrap(); + + // Execute with only 1 parameter when 2 are expected - should handle gracefully + let result = stmt.query(vec![Value::Integer(1)]).await; + + // Depending on libsql behaviour, may error or coerce - key is no panic + let _ = result; +} + +// ============================================================================ +// DATABASE FILE ERROR TESTS +// ============================================================================ + +#[cfg(unix)] +#[tokio::test] +async fn test_create_db_invalid_permissions() { + // Test with path that's definitely invalid (Unix-specific: null bytes) + let invalid_path = "\0invalid\0path.db"; // Null bytes in path + + // Creating DB with invalid path should error, not panic + let result = Builder::new_local(invalid_path).build().await; + + // This should error due to invalid path, or succeed silently + // The key is it doesn't panic + let _ = result; +} + +#[cfg(windows)] +#[tokio::test] +async fn test_create_db_invalid_permissions() { + // Test with path that's definitely invalid (Windows-specific: invalid characters) + let invalid_path = "COM1"; // Reserved device name on Windows + + // Creating DB with invalid path should error, not panic + let result = Builder::new_local(invalid_path).build().await; + + // This should error due to invalid path, or succeed silently + // The key is it doesn't panic + let _ = result; +} + +#[tokio::test] +async fn test_database_persistence_and_reopen() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db_path_str = db_path.to_str().unwrap(); + + // Create database, table, and insert data + let db = Builder::new_local(db_path_str).build().await.unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER)", ()) + .await + .unwrap(); + + conn.execute( + "INSERT INTO users (id) VALUES (?1)", + vec![Value::Integer(1)], + ) + .await + .unwrap(); + + // Verify data was inserted + let mut rows = conn.query("SELECT COUNT(*) FROM users", ()).await.unwrap(); + let row = rows.next().await.unwrap().unwrap(); + let count = row.get::(0).unwrap(); + assert_eq!(count, 1, "Data should be inserted"); + + drop(conn); + drop(db); + + // Reopen database and verify persistence + // This tests that data survives connection close/reopen cycles + let db2 = Builder::new_local(db_path_str).build().await.unwrap(); + let conn2 = db2.connect().unwrap(); + + // Query should work and return persisted data + let mut rows = conn2.query("SELECT COUNT(*) FROM users", ()).await.unwrap(); + let row = rows.next().await.unwrap().unwrap(); + let count = row.get::(0).unwrap(); + assert_eq!( + count, 1, + "Persisted data should be readable after reopening" + ); +} + +// ============================================================================ +// EDGE CASE TESTS +// ============================================================================ + +#[tokio::test] +async fn test_empty_sql_statement() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + // Empty SQL - should return error, not panic + let result = conn.execute("", ()).await; + + assert!(result.is_err(), "Expected error for empty SQL"); +} + +#[tokio::test] +async fn test_whitespace_only_sql() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + // Whitespace-only SQL - should return error, not panic + let result = conn.execute(" \n\t ", ()).await; + + assert!(result.is_err(), "Expected error for whitespace-only SQL"); +} + +#[tokio::test] +async fn test_very_long_sql_query() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER)", ()) + .await + .unwrap(); + + // Create very long WHERE clause (1000 OR conditions) + let mut sql = "SELECT * FROM users WHERE id = 1".to_string(); + for i in 2..=1000 { + sql.push_str(&format!(" OR id = {i}")); + } + + // Very long query should either work or fail gracefully, not panic + let result = conn.query(&sql, ()).await; + let _ = result; // Don't assert on success/failure, just that it doesn't panic +} + +#[tokio::test] +async fn test_unicode_in_sql() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) + .await + .unwrap(); + + // Unicode in parameter - should work fine + let result = conn + .execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![ + Value::Integer(1), + Value::Text("Ålice 中文 العربية".to_string()), + ], + ) + .await; + + assert!(result.is_ok(), "Should handle unicode values"); + + // Verify retrieval + let mut rows = conn + .query( + "SELECT name FROM users WHERE id = ?1", + vec![Value::Integer(1)], + ) + .await + .unwrap(); + let row = rows.next().await.unwrap().unwrap(); + let name = row.get::(0).unwrap(); + assert_eq!(name, "Ålice 中文 العربية"); +} + +#[tokio::test] +async fn test_sql_injection_attempt() { + let db_path = setup_test_db_with_prefix("errors"); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); + let conn = db.connect().unwrap(); + + conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) + .await + .unwrap(); + + // SQL injection attempt should be safely parameterised + let result = conn + .execute( + "INSERT INTO users (id, name) VALUES (?1, ?2)", + vec![ + Value::Integer(1), + Value::Text("Alice'; DROP TABLE users; --".to_string()), + ], + ) + .await; + + assert!( + result.is_ok(), + "Parameterised query should safely insert injection string" + ); + + // Verify table still exists and contains the literal string + let mut rows = conn.query("SELECT COUNT(*) FROM users", ()).await.unwrap(); + let row = rows.next().await.unwrap().unwrap(); + let count = row.get::(0).unwrap(); + assert_eq!( + count, 1, + "Table should still exist with parameterised injection" + ); +} diff --git a/native/ecto_libsql/src/tests/integration_tests.rs b/native/ecto_libsql/src/tests/integration_tests.rs index 181ebc2..a4855f7 100644 --- a/native/ecto_libsql/src/tests/integration_tests.rs +++ b/native/ecto_libsql/src/tests/integration_tests.rs @@ -7,23 +7,15 @@ // Allow unwrap() in tests for cleaner test code - see CLAUDE.md "Test Code Exception" #![allow(clippy::unwrap_used)] +use super::test_utils::{setup_test_db, TestDbGuard}; use libsql::{Builder, Value}; -use std::fs; -use uuid::Uuid; - -fn setup_test_db() -> String { - format!("z_ecto_libsql_test-{}.db", Uuid::new_v4()) -} - -fn cleanup_test_db(db_path: &str) { - let _ = fs::remove_file(db_path); -} #[tokio::test] async fn test_create_local_database() { let db_path = setup_test_db(); + let _guard = TestDbGuard::new(db_path.clone()); - let result = Builder::new_local(&db_path).build().await; + let result = Builder::new_local(db_path.to_str().unwrap()).build().await; assert!(result.is_ok(), "Failed to create local database"); let db = result.unwrap(); @@ -34,14 +26,17 @@ async fn test_create_local_database() { .execute("CREATE TABLE test (id INTEGER PRIMARY KEY, name TEXT)", ()) .await; assert!(result.is_ok(), "Failed to create table"); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_parameter_binding_with_integers() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE users (id INTEGER, age INTEGER)", ()) @@ -70,14 +65,17 @@ async fn test_parameter_binding_with_integers() { let row = rows.next().await.unwrap().unwrap(); assert_eq!(row.get::(0).unwrap(), 1); assert_eq!(row.get::(1).unwrap(), 30); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_parameter_binding_with_floats() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE products (id INTEGER, price REAL)", ()) @@ -110,14 +108,17 @@ async fn test_parameter_binding_with_floats() { (price - 19.99).abs() < 0.01, "Price should be approximately 19.99" ); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_parameter_binding_with_text() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) @@ -145,14 +146,17 @@ async fn test_parameter_binding_with_text() { let row = rows.next().await.unwrap().unwrap(); assert_eq!(row.get::(0).unwrap(), "Alice"); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_transaction_commit() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) @@ -173,14 +177,17 @@ async fn test_transaction_commit() { let mut rows = conn.query("SELECT COUNT(*) FROM users", ()).await.unwrap(); let row = rows.next().await.unwrap().unwrap(); assert_eq!(row.get::(0).unwrap(), 1); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_transaction_rollback() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) @@ -201,14 +208,17 @@ async fn test_transaction_rollback() { let mut rows = conn.query("SELECT COUNT(*) FROM users", ()).await.unwrap(); let row = rows.next().await.unwrap().unwrap(); assert_eq!(row.get::(0).unwrap(), 0); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_prepared_statement() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE users (id INTEGER, name TEXT)", ()) @@ -238,7 +248,7 @@ async fn test_prepared_statement() { let first_row = result_rows_1.next().await.unwrap().unwrap(); assert_eq!(first_row.get::(0).unwrap(), "Alice"); - // Test prepared statement with second parameter (prepare again, mimicking NIF behavior) + // Test prepared statement with second parameter (prepare again, mimicking NIF behaviour) let stmt2 = conn .prepare("SELECT name FROM users WHERE id = ?1") .await @@ -246,14 +256,17 @@ async fn test_prepared_statement() { let mut result_rows_2 = stmt2.query(vec![Value::Integer(2)]).await.unwrap(); let second_row = result_rows_2.next().await.unwrap().unwrap(); assert_eq!(second_row.get::(0).unwrap(), "Bob"); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_blob_storage() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE files (id INTEGER, data BLOB)", ()) @@ -280,14 +293,17 @@ async fn test_blob_storage() { let row = rows.next().await.unwrap().unwrap(); let retrieved_data = row.get::>(0).unwrap(); assert_eq!(retrieved_data, test_data); - - cleanup_test_db(&db_path); } #[tokio::test] async fn test_null_values() { let db_path = setup_test_db(); - let db = Builder::new_local(&db_path).build().await.unwrap(); + let _guard = TestDbGuard::new(db_path.clone()); + + let db = Builder::new_local(db_path.to_str().unwrap()) + .build() + .await + .unwrap(); let conn = db.connect().unwrap(); conn.execute("CREATE TABLE users (id INTEGER, email TEXT)", ()) @@ -313,6 +329,4 @@ async fn test_null_values() { let row = rows.next().await.unwrap().unwrap(); let email_value = row.get_value(0).unwrap(); assert!(matches!(email_value, Value::Null)); - - cleanup_test_db(&db_path); } diff --git a/native/ecto_libsql/src/tests/mod.rs b/native/ecto_libsql/src/tests/mod.rs index 7be3bbc..ba6ac28 100644 --- a/native/ecto_libsql/src/tests/mod.rs +++ b/native/ecto_libsql/src/tests/mod.rs @@ -4,6 +4,8 @@ //! that correspond to the main library modules. mod constants_tests; +mod error_handling_tests; mod integration_tests; mod proptest_tests; +mod test_utils; mod utils_tests; diff --git a/native/ecto_libsql/src/tests/test_utils.rs b/native/ecto_libsql/src/tests/test_utils.rs new file mode 100644 index 0000000..3b6e6f2 --- /dev/null +++ b/native/ecto_libsql/src/tests/test_utils.rs @@ -0,0 +1,109 @@ +//! Shared test utilities for integration and error handling tests +//! +//! This module provides common test infrastructure used across multiple test files +//! to avoid duplication and ensure consistent test behavior. + +use std::fs; +use std::path::PathBuf; +use uuid::Uuid; + +/// RAII guard that ensures database and associated SQLite files are cleaned up +/// after all database handles (conn, db) are dropped. +/// +/// This guard must be declared FIRST in tests so its Drop impl runs LAST, +/// ensuring files are deleted only after the db connection is fully closed. +/// This prevents Windows file-lock issues with .db, .db-wal, .db-shm, and other +/// SQLite auxiliary files. Removes all five file types for parity with Elixir's +/// cleanup_db_files/1 helper: +/// - .db (main database file) +/// - .db-wal (Write-Ahead Log) +/// - .db-shm (Shared Memory) +/// - .db-journal (Journal file) +/// - .db-info (Info file for replication metadata) +pub struct TestDbGuard { + db_path: PathBuf, +} + +impl TestDbGuard { + /// Create a new test database guard for the given path. + /// + /// # Example + /// + /// ```ignore + /// let db_path = setup_test_db(); + /// let _guard = TestDbGuard::new(db_path.clone()); + /// // ... database operations ... + /// // Guard automatically cleans up when dropped + /// ``` + pub fn new(db_path: PathBuf) -> Self { + TestDbGuard { db_path } + } +} + +impl Drop for TestDbGuard { + fn drop(&mut self) { + // Remove main database file + let _ = fs::remove_file(&self.db_path); + + // Remove WAL (Write-Ahead Log) file + let wal_path = format!("{}-wal", self.db_path.display()); + let _ = fs::remove_file(&wal_path); + + // Remove SHM (Shared Memory) file + let shm_path = format!("{}-shm", self.db_path.display()); + let _ = fs::remove_file(&shm_path); + + // Remove JOURNAL file (SQLite rollback journal) + let journal_path = format!("{}-journal", self.db_path.display()); + let _ = fs::remove_file(&journal_path); + + // Remove INFO file (replication metadata for remote replicas) + let info_path = format!("{}-info", self.db_path.display()); + let _ = fs::remove_file(&info_path); + } +} + +/// Set up a unique test database file in the system temp directory. +/// +/// Generates a unique database filename using UUID to ensure test isolation. +/// +/// # Returns +/// +/// A `PathBuf` pointing to a temporary database file. +/// +/// # Example +/// +/// ```ignore +/// let db_path = setup_test_db(); +/// let _guard = TestDbGuard::new(db_path.clone()); +/// let db = Builder::new_local(db_path.to_str().unwrap()).build().await.unwrap(); +/// ``` +pub fn setup_test_db() -> PathBuf { + let temp_dir = std::env::temp_dir(); + let db_name = format!("z_ecto_libsql_test-{}.db", Uuid::new_v4()); + temp_dir.join(db_name) +} + +/// Set up a test database with a specific name prefix. +/// +/// Useful when you want to ensure a specific database name pattern for debugging. +/// +/// # Arguments +/// +/// * `prefix` - A string prefix for the database name (e.g., "errors", "integration") +/// +/// # Returns +/// +/// A `PathBuf` pointing to a temporary database file with the given prefix. +/// +/// # Example +/// +/// ```ignore +/// let db_path = setup_test_db_with_prefix("errors"); +/// // Results in: /tmp/z_ecto_libsql_test-errors-.db +/// ``` +pub fn setup_test_db_with_prefix(prefix: &str) -> PathBuf { + let temp_dir = std::env::temp_dir(); + let db_name = format!("z_ecto_libsql_test-{}-{}.db", prefix, Uuid::new_v4()); + temp_dir.join(db_name) +} diff --git a/native/ecto_libsql/src/transaction.rs b/native/ecto_libsql/src/transaction.rs index 7550655..10d40d7 100644 --- a/native/ecto_libsql/src/transaction.rs +++ b/native/ecto_libsql/src/transaction.rs @@ -1,7 +1,7 @@ /// Transaction management for LibSQL databases. /// /// This module handles database transactions, including: -/// - Starting transactions with configurable locking behavior +/// - Starting transactions with configurable locking behaviour /// - Executing queries and statements within transactions /// - Committing or rolling back transactions /// - Transaction ownership verification @@ -152,7 +152,7 @@ impl Drop for TransactionEntryGuard { /// Begin a new database transaction. /// -/// Starts a transaction with the default DEFERRED behavior, which acquires +/// Starts a transaction with the default DEFERRED behaviour, which acquires /// locks only when needed. Use `begin_transaction_with_behavior` for fine-grained /// control over transaction locking. /// @@ -200,7 +200,7 @@ pub fn begin_transaction(conn_id: &str) -> NifResult { Ok(trx_id) } -/// Begin a new database transaction with specific locking behavior. +/// Begin a new database transaction with specific locking behaviour. /// /// Allows control over how aggressively the transaction acquires locks: /// - `:deferred` - Acquire locks only when needed (default, recommended) diff --git a/test/advanced_features_test.exs b/test/advanced_features_test.exs index c42a962..a45a4fa 100644 --- a/test/advanced_features_test.exs +++ b/test/advanced_features_test.exs @@ -73,9 +73,7 @@ defmodule EctoLibSql.AdvancedFeaturesTest do EctoLibSql.disconnect([], state) # Cleanup - File.rm(db_path) - File.rm(db_path <> "-shm") - File.rm(db_path <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(db_path) end test "max_write_replication_index returns error for invalid connection" do diff --git a/test/batch_features_test.exs b/test/batch_features_test.exs index add4f51..83aa461 100644 --- a/test/batch_features_test.exs +++ b/test/batch_features_test.exs @@ -13,9 +13,7 @@ defmodule EctoLibSql.BatchFeaturesTest do opts = [database: test_db] on_exit(fn -> - File.rm(test_db) - File.rm(test_db <> "-shm") - File.rm(test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(test_db) end) {:ok, database: test_db, opts: opts} diff --git a/test/connection_features_test.exs b/test/connection_features_test.exs index b5c25c9..2248435 100644 --- a/test/connection_features_test.exs +++ b/test/connection_features_test.exs @@ -11,9 +11,7 @@ defmodule EctoLibSql.ConnectionFeaturesTest do test_db = "z_ecto_libsql_test-conn_features_#{:erlang.unique_integer([:positive])}.db" on_exit(fn -> - File.rm(test_db) - File.rm(test_db <> "-shm") - File.rm(test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(test_db) end) {:ok, database: test_db} diff --git a/test/connection_recovery_test.exs b/test/connection_recovery_test.exs new file mode 100644 index 0000000..1895c7e --- /dev/null +++ b/test/connection_recovery_test.exs @@ -0,0 +1,457 @@ +defmodule EctoLibSql.ConnectionRecoveryTest do + use ExUnit.Case + alias EctoLibSql + + # Tests for connection recovery and resilience after failures. + # Focuses on critical real-world scenarios. + + setup do + {:ok, state} = EctoLibSql.connect(database: ":memory:") + + on_exit(fn -> + EctoLibSql.disconnect([], state) + end) + + {:ok, state: state} + end + + describe "connection recovery from errors" do + test "connection remains usable after failed query", %{state: state} do + # Set up a table + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE test_data (id INTEGER PRIMARY KEY, value TEXT)", + [], + [], + state + ) + + # Execute a successful query + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (id, value) VALUES (1, 'first')", + [], + [], + state + ) + + # Attempt a query that fails - connection should survive + _result = EctoLibSql.handle_execute("SELECT * FROM nonexistent_table", [], [], state) + + # Connection should still be usable after error + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT * FROM test_data", + [], + [], + state + ) + + assert result.num_rows == 1 + end + + test "constraint violation doesn't break connection", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE users (id INTEGER PRIMARY KEY, email TEXT UNIQUE NOT NULL)", + [], + [], + state + ) + + # Insert valid data + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO users (id, email) VALUES (1, 'alice@example.com')", + [], + [], + state + ) + + # Attempt insert with duplicate email - should fail but not crash connection + _result = + EctoLibSql.handle_execute( + "INSERT INTO users (id, email) VALUES (2, 'alice@example.com')", + [], + [], + state + ) + + # Connection should still be usable + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM users", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 1, "Only one user should exist after constraint violation" + end + + test "syntax error doesn't break connection", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE items (id INTEGER PRIMARY KEY, name TEXT)", + [], + [], + state + ) + + # Insert with correct parameters + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO items (id, name) VALUES (?, ?)", + [1, "item1"], + [], + state + ) + + # Attempt with invalid SQL syntax + _result = + EctoLibSql.handle_execute( + "INSRT INTO items (id, name) VALUES (2, 'item2')", + [], + [], + state + ) + + # Connection should still work + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM items", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 1 + end + + test "transaction survives query errors within transaction", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE accounts (id INTEGER PRIMARY KEY, balance INTEGER)", + [], + [], + state + ) + + # Begin transaction + {:ok, :begin, state} = EctoLibSql.handle_begin([], state) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO accounts (id, balance) VALUES (1, 100)", + [], + [], + state + ) + + # Execute query that fails within transaction + _error_result = + EctoLibSql.handle_execute( + "SELECT invalid_column FROM accounts", + [], + [], + state + ) + + # Transaction should still be rollbackable + {:ok, _, state} = EctoLibSql.handle_rollback([], state) + + # Verify transaction was rolled back + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM accounts", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 0, "Transaction should have been rolled back" + end + + test "prepared statement error doesn't break connection", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE products (id INTEGER PRIMARY KEY, name TEXT)", + [], + [], + state + ) + + # Try to prepare invalid statement + _prep_result = EctoLibSql.Native.prepare(state, "SELECT * FRM products") + + # Connection should still be usable + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO products (id, name) VALUES (1, 'product1')", + [], + [], + state + ) + + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM products", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 1 + end + + test "NULL constraint violation handled gracefully", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE records (id INTEGER PRIMARY KEY, data TEXT NOT NULL)", + [], + [], + state + ) + + # Try to insert NULL + _error_result = + EctoLibSql.handle_execute( + "INSERT INTO records (id, data) VALUES (1, ?)", + [nil], + [], + state + ) + + # Connection still works + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO records (id, data) VALUES (2, 'valid_data')", + [], + [], + state + ) + + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM records", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 1 + end + + test "multiple sequential errors don't accumulate damage", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)", + [], + [], + state + ) + + # Execute multiple errors in sequence + _err1 = EctoLibSql.handle_execute("INVALID SQL", [], [], state) + _err2 = EctoLibSql.handle_execute("SELECT * FROM nonexistent", [], [], state) + _err3 = EctoLibSql.handle_execute("INSERT INTO test VALUES ()", [], [], state) + + # Connection should fully recover + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO test (id, value) VALUES (1, 'ok')", + [], + [], + state + ) + + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 1 + end + + test "batch operations with failures don't break connection", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE batch_test (id INTEGER PRIMARY KEY, value TEXT)", + [], + [], + state + ) + + # Batch with statements + statements = [ + {"INSERT INTO batch_test (id, value) VALUES (1, 'ok')", []}, + {"INSERT INTO batch_test (id, value) VALUES (2, 'also_ok')", []} + ] + + # Batch should execute + _batch_result = EctoLibSql.Native.batch(state, statements) + + # Connection still works for new operations + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO batch_test (id, value) VALUES (3, 'new')", + [], + [], + state + ) + + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM batch_test", + [], + [], + state + ) + + [[count]] = result.rows + assert count >= 1 + end + + test "savepoint error recovery", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE savepoint_test (id INTEGER PRIMARY KEY, value TEXT)", + [], + [], + state + ) + + # Begin transaction + {:ok, :begin, state} = EctoLibSql.handle_begin([], state) + + # First insert before savepoint + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO savepoint_test (id, value) VALUES (1, 'before')", + [], + [], + state + ) + + # Create savepoint (returns :ok) + :ok = EctoLibSql.Native.create_savepoint(state, "sp1") + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO savepoint_test (id, value) VALUES (2, 'inside')", + [], + [], + state + ) + + # Cause an error within savepoint + _error = EctoLibSql.handle_execute("SELEC * FROM savepoint_test", [], [], state) + + # Rollback to savepoint - only rolls back 'inside' insert, keeps 'before' + :ok = EctoLibSql.Native.rollback_to_savepoint_by_name(state, "sp1") + + # Should be able to continue transaction + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO savepoint_test (id, value) VALUES (3, 'after')", + [], + [], + state + ) + + # Commit + {:ok, _, state} = EctoLibSql.handle_commit([], state) + + # Should have 'before' and 'after', but not 'inside' + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM savepoint_test", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 2 + end + + test "busy timeout is configured without breaking connection", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE lock_test (id INTEGER PRIMARY KEY, value TEXT)", + [], + [], + state + ) + + # Configure timeout + :ok = EctoLibSql.Native.busy_timeout(state, 1000) + + # Connection should still work + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO lock_test (id, value) VALUES (1, 'data')", + [], + [], + state + ) + + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM lock_test", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 1 + end + + test "connection resets properly without losing data", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "CREATE TABLE reset_test (id INTEGER PRIMARY KEY, value TEXT)", + [], + [], + state + ) + + # Insert data + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO reset_test (id, value) VALUES (1, 'data')", + [], + [], + state + ) + + # Cause an error + _error = EctoLibSql.handle_execute("SELECT * FROM nonexistent", [], [], state) + + # Reset connection state (returns :ok) + :ok = EctoLibSql.Native.reset(state) + + # Data should still be there after reset + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM reset_test", + [], + [], + state + ) + + [[count]] = result.rows + assert count == 1 + end + end +end diff --git a/test/cte_test.exs b/test/cte_test.exs index 366fddc..37d6fa9 100644 --- a/test/cte_test.exs +++ b/test/cte_test.exs @@ -64,9 +64,7 @@ defmodule EctoLibSql.CTETest do """) on_exit(fn -> - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) :ok diff --git a/test/cursor_streaming_large_test.exs b/test/cursor_streaming_large_test.exs new file mode 100644 index 0000000..057ab81 --- /dev/null +++ b/test/cursor_streaming_large_test.exs @@ -0,0 +1,813 @@ +defmodule EctoLibSql.CursorStreamingLargeTest do + use ExUnit.Case + alias EctoLibSql + + # These tests verify that cursors can stream large datasets without + # loading all data into memory at once. They also test cursor lifecycle + # and batch size handling. + + setup do + {:ok, state} = EctoLibSql.connect(database: ":memory:") + conn_id = state.conn_id + + # Create a test table for large data + {:ok, _, _, state} = + EctoLibSql.handle_execute( + """ + CREATE TABLE large_data ( + id INTEGER PRIMARY KEY, + batch_id INTEGER, + sequence INTEGER, + value TEXT, + data BLOB + ) + """, + [], + [], + state + ) + + on_exit(fn -> + # Use conn_id to ensure we disconnect the correct connection + EctoLibSql.disconnect([], %EctoLibSql.State{conn_id: conn_id}) + end) + + {:ok, state: state} + end + + describe "cursor streaming with large datasets" do + test "stream 1000 rows without loading all into memory", %{state: state} do + # Insert 1000 test rows + state = insert_rows(state, 1, 1000, 1) + + query = %EctoLibSql.Query{statement: "SELECT * FROM large_data ORDER BY id"} + + # Declare cursor + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Fetch all rows in batches + row_count = fetch_all_rows(state, cursor, query, max_rows: 500) + assert row_count == 1000, "Should fetch exactly 1000 rows" + end + + test "stream 10K rows with different batch sizes", %{state: state} do + state = insert_rows(state, 1, 10_000, 1) + + query = %EctoLibSql.Query{statement: "SELECT id, value FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Fetch with batch size 1000 + row_count = fetch_all_rows(state, cursor, query, max_rows: 1000) + assert row_count == 10_000, "Should fetch exactly 10K rows" + end + + test "cursor respects max_rows batch size setting", %{state: state} do + state = insert_rows(state, 1, 5000, 1) + + query = %EctoLibSql.Query{statement: "SELECT * FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Track batch sizes + {:cont, result, state} = + EctoLibSql.handle_fetch(query, cursor, [max_rows: 100], state) + + # First batch should be at most 100 rows + assert result.num_rows <= 100, "First batch should respect max_rows=100" + + row_count = result.num_rows + fetch_remaining_rows(state, cursor, query, max_rows: 100) + assert row_count == 5000 + end + + test "cursor with WHERE clause filters on large dataset", %{state: state} do + # Insert rows with different batch_ids + state = insert_rows(state, 1, 5000, 1) + state = insert_rows(state, 5001, 10000, 2) + + query = %EctoLibSql.Query{ + statement: "SELECT * FROM large_data WHERE batch_id = 2 ORDER BY id" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + row_count = fetch_all_rows(state, cursor, query, max_rows: 500) + assert row_count == 5000, "Should fetch exactly 5000 filtered rows" + end + + test "cursor processes rows in order", %{state: state} do + state = insert_rows(state, 1, 1000, 1) + + query = %EctoLibSql.Query{statement: "SELECT id FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Collect all IDs and verify they're in order + ids = fetch_all_ids(state, cursor, query, max_rows: 100) + expected_ids = Enum.to_list(1..1000) + assert ids == expected_ids, "Rows should be in order" + end + + test "cursor with BLOB data handles binary correctly", %{state: state} do + # Create table with binary data + {:ok, _, _, state} = + EctoLibSql.handle_execute( + """ + CREATE TABLE binary_test ( + id INTEGER PRIMARY KEY, + data BLOB + ) + """, + [], + [], + state + ) + + # Insert 100 rows with 1KB binary data each + state = + Enum.reduce(1..100, state, fn i, acc_state -> + binary_data = <> <> :binary.copy(<<0xFF>>, 1020) + + {:ok, _, _, new_state} = + EctoLibSql.handle_execute( + "INSERT INTO binary_test (id, data) VALUES (?, ?)", + [i, binary_data], + [], + acc_state + ) + + new_state + end) + + query = %EctoLibSql.Query{statement: "SELECT id, data FROM binary_test ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Verify binary data is preserved + binary_rows = fetch_all_binary_rows(state, cursor, query, max_rows: 25) + assert length(binary_rows) == 100 + + # Check first row's binary data + [first_id, first_data] = hd(binary_rows) + assert first_id == 1 + assert is_binary(first_data) + assert byte_size(first_data) == 1024 + end + + test "cursor with JOIN on large dataset", %{state: state} do + # Create another table for join + {:ok, _, _, state} = + EctoLibSql.handle_execute( + """ + CREATE TABLE categories ( + id INTEGER PRIMARY KEY, + name TEXT + ) + """, + [], + [], + state + ) + + # Insert categories + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO categories (id, name) VALUES (1, 'cat1'), (2, 'cat2')", + [], + [], + state + ) + + # Insert 5000 rows + state = insert_rows(state, 1, 5000, 1) + + query = %EctoLibSql.Query{ + statement: + "SELECT ld.id, ld.value, c.name FROM large_data ld LEFT JOIN categories c ON ld.batch_id = c.id ORDER BY ld.id" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + row_count = fetch_all_rows(state, cursor, query, max_rows: 500) + assert row_count == 5000 + end + + test "cursor with computed/derived columns", %{state: state} do + state = insert_rows(state, 1, 1000, 1) + + query = %EctoLibSql.Query{ + statement: + "SELECT id, value, LENGTH(value) as value_length, batch_id * 10 as scaled_batch FROM large_data ORDER BY id" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + rows = fetch_all_computed_rows(state, cursor, query, max_rows: 100) + assert length(rows) == 1000 + + # Verify computed columns + [first_id, first_value, first_length, first_scaled] = hd(rows) + assert first_id == 1 + assert is_binary(first_value) + assert first_length == String.length(first_value) + # 1 * 10 + assert first_scaled == 10 + end + + test "cursor lifecycle: declare, fetch in batches, implicit close", %{state: state} do + state = insert_rows(state, 1, 1000, 1) + + query = %EctoLibSql.Query{statement: "SELECT * FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Fetch multiple batches + batch_count = count_batches(state, cursor, query, max_rows: 100) + + # Should have exactly 11 batches: 10 with 100 rows each, plus 1 final batch with 0 rows + assert batch_count == 11, "Should have exactly 11 batches for 1000 rows with batch size 100" + end + + test "cursor with aggregation query", %{state: state} do + state = insert_rows(state, 1, 1000, 1) + + query = %EctoLibSql.Query{statement: "SELECT COUNT(*) as count FROM large_data"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + {:cont, result, _state} = + EctoLibSql.handle_fetch(query, cursor, [max_rows: 100], state) + + [[count]] = result.rows + assert count == 1000 + end + + test "cursor with GROUP BY and aggregation", %{state: state} do + # Insert rows with different batch_ids + state = + Enum.reduce(1..5, state, fn batch, acc_state -> + insert_rows(acc_state, (batch - 1) * 2000 + 1, batch * 2000, batch) + end) + + query = %EctoLibSql.Query{ + statement: + "SELECT batch_id, COUNT(*) as count FROM large_data GROUP BY batch_id ORDER BY batch_id" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + rows = fetch_all_group_rows(state, cursor, query, max_rows: 10) + + # Should have 5 groups + assert length(rows) == 5 + + # Each group should have 2000 rows + Enum.each(rows, fn [_batch_id, count] -> + assert count == 2000 + end) + end + + test "cursor with OFFSET/LIMIT", %{state: state} do + state = insert_rows(state, 1, 1000, 1) + + query = %EctoLibSql.Query{ + statement: "SELECT id FROM large_data ORDER BY id LIMIT 100 OFFSET 500" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + ids = fetch_all_ids(state, cursor, query, max_rows: 50) + + # Should get rows 501-600 + assert length(ids) == 100 + assert hd(ids) == 501 + assert List.last(ids) == 600 + end + + test "cursor with DISTINCT", %{state: state} do + # Insert rows with repeating batch_ids (using different ID ranges) + state = insert_rows(state, 1, 100, 1) + state = insert_rows(state, 101, 200, 2) + state = insert_rows(state, 201, 300, 1) + state = insert_rows(state, 301, 400, 3) + state = insert_rows(state, 401, 500, 2) + state = insert_rows(state, 501, 600, 1) + + query = %EctoLibSql.Query{ + statement: "SELECT DISTINCT batch_id FROM large_data ORDER BY batch_id" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + rows = fetch_all_distinct_rows(state, cursor, query, max_rows: 10) + + # Should have 3 distinct batch_ids: 1, 2, 3 + assert length(rows) == 3 + assert List.flatten(rows) == [1, 2, 3] + end + end + + describe "cursor error handling and edge cases" do + test "handle_declare with malformed SQL returns error", %{state: state} do + query = %EctoLibSql.Query{statement: "SELEKT * FORM nonexistent_table"} + + result = EctoLibSql.handle_declare(query, [], [], state) + + # Should return an error tuple for invalid SQL + assert {:error, _reason, _state} = result + end + + test "handle_declare with syntax error in WHERE clause", %{state: state} do + query = %EctoLibSql.Query{ + statement: "SELECT * FROM large_data WHERE id = = 1" + } + + result = EctoLibSql.handle_declare(query, [], [], state) + + assert {:error, _reason, _state} = result + end + + test "handle_declare on non-existent table returns error", %{state: state} do + query = %EctoLibSql.Query{statement: "SELECT * FROM table_that_does_not_exist"} + + result = EctoLibSql.handle_declare(query, [], [], state) + + assert {:error, _reason, _state} = result + end + + test "empty result set returns 0 rows", %{state: state} do + # Table is empty, no rows inserted + query = %EctoLibSql.Query{statement: "SELECT * FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + row_count = fetch_all_rows(state, cursor, query, max_rows: 100) + assert row_count == 0, "Empty table should return 0 rows" + end + + test "cursor with WHERE clause matching no rows returns 0 rows", %{state: state} do + state = insert_rows(state, 1, 100, 1) + + query = %EctoLibSql.Query{ + statement: "SELECT * FROM large_data WHERE batch_id = 999 ORDER BY id" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + row_count = fetch_all_rows(state, cursor, query, max_rows: 50) + assert row_count == 0, "No matching rows should return 0" + end + + test "cursor with NULL values in data", %{state: state} do + # Insert rows with NULL values in the value column + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (1, 1, 1, NULL)", + [], + [], + state + ) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (2, 1, 2, 'not_null')", + [], + [], + state + ) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (3, 1, 3, NULL)", + [], + [], + state + ) + + query = %EctoLibSql.Query{statement: "SELECT id, value FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + rows = fetch_all_cursor_rows(state, cursor, query, max_rows: 10) + + assert length(rows) == 3 + # Verify NULL values are preserved + [[1, val1], [2, val2], [3, val3]] = rows + assert val1 == nil + assert val2 == "not_null" + assert val3 == nil + end + + test "cursor with empty string values", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (1, 1, 1, '')", + [], + [], + state + ) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (2, 1, 2, 'non_empty')", + [], + [], + state + ) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (3, 1, 3, '')", + [], + [], + state + ) + + query = %EctoLibSql.Query{statement: "SELECT id, value FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + rows = fetch_all_cursor_rows(state, cursor, query, max_rows: 10) + + assert length(rows) == 3 + [[1, val1], [2, val2], [3, val3]] = rows + assert val1 == "" + assert val2 == "non_empty" + assert val3 == "" + end + + test "cursor with mixed NULL and empty string values", %{state: state} do + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (1, 1, 1, NULL)", + [], + [], + state + ) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (2, 1, 2, '')", + [], + [], + state + ) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (3, 1, 3, 'value')", + [], + [], + state + ) + + query = %EctoLibSql.Query{ + statement: "SELECT id, value, value IS NULL as is_null FROM large_data ORDER BY id" + } + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + rows = fetch_all_cursor_rows(state, cursor, query, max_rows: 10) + + assert length(rows) == 3 + # SQLite returns 1 for true, 0 for false + [[1, nil, 1], [2, "", 0], [3, "value", 0]] = rows + end + end + + describe "cursor transaction behaviour" do + test "cursor declared in transaction fails after rollback", %{state: state} do + state = insert_rows(state, 1, 100, 1) + + # Begin transaction + {:ok, :begin, state} = EctoLibSql.handle_begin([], state) + + query = %EctoLibSql.Query{statement: "SELECT * FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Fetch some rows within transaction + {:cont, result, state} = + EctoLibSql.handle_fetch(query, cursor, [max_rows: 10], state) + + assert result.num_rows == 10 + + # Rollback the transaction + {:ok, _result, state} = EctoLibSql.handle_rollback([], state) + + # After rollback, fetching from the cursor should fail or return empty + # The cursor may be invalidated depending on implementation + fetch_result = EctoLibSql.handle_fetch(query, cursor, [max_rows: 10], state) + + case fetch_result do + {:error, _reason, _state} -> + # Expected: cursor invalidated after rollback + :ok + + {:halt, result, _state} -> + # Also acceptable: cursor exhausted/closed + assert result.num_rows == 0 + + {:cont, result, _state} -> + # If cursor continues, it should still work but this is less expected + assert is_integer(result.num_rows) + end + end + + test "cursor sees uncommitted changes within same transaction", %{state: state} do + # Begin transaction + {:ok, :begin, state} = EctoLibSql.handle_begin([], state) + + # Insert rows within transaction + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (1, 1, 1, 'trx_row')", + [], + [], + state + ) + + query = %EctoLibSql.Query{statement: "SELECT * FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + row_count = fetch_all_rows(state, cursor, query, max_rows: 10) + + # Should see the uncommitted row + assert row_count == 1 + + # Rollback + {:ok, _result, _state} = EctoLibSql.handle_rollback([], state) + end + end + + describe "concurrent cursor operations" do + test "multiple cursors on different queries return correct results", %{state: state} do + # Insert data for two different batch_ids + state = insert_rows(state, 1, 500, 1) + state = insert_rows(state, 501, 1000, 2) + + query1 = %EctoLibSql.Query{ + statement: "SELECT id FROM large_data WHERE batch_id = 1 ORDER BY id" + } + + query2 = %EctoLibSql.Query{ + statement: "SELECT id FROM large_data WHERE batch_id = 2 ORDER BY id" + } + + # Declare both cursors + {:ok, ^query1, cursor1, state} = + EctoLibSql.handle_declare(query1, [], [], state) + + {:ok, ^query2, cursor2, state} = + EctoLibSql.handle_declare(query2, [], [], state) + + # Interleave fetches from both cursors + {:cont, result1_a, state} = + EctoLibSql.handle_fetch(query1, cursor1, [max_rows: 100], state) + + {:cont, result2_a, state} = + EctoLibSql.handle_fetch(query2, cursor2, [max_rows: 100], state) + + {:cont, result1_b, state} = + EctoLibSql.handle_fetch(query1, cursor1, [max_rows: 100], state) + + {:cont, result2_b, _state} = + EctoLibSql.handle_fetch(query2, cursor2, [max_rows: 100], state) + + # Verify cursor1 returns batch_id=1 rows (ids 1-500) + cursor1_ids = + Enum.map(result1_a.rows ++ result1_b.rows, fn [id] -> id end) + + assert Enum.all?(cursor1_ids, fn id -> id >= 1 and id <= 500 end) + + # Verify cursor2 returns batch_id=2 rows (ids 501-1000) + cursor2_ids = + Enum.map(result2_a.rows ++ result2_b.rows, fn [id] -> id end) + + assert Enum.all?(cursor2_ids, fn id -> id >= 501 and id <= 1000 end) + + # Verify ordering within each cursor + assert cursor1_ids == Enum.sort(cursor1_ids) + assert cursor2_ids == Enum.sort(cursor2_ids) + end + + test "concurrent tasks with separate cursors", %{state: state} do + state = insert_rows(state, 1, 1000, 1) + + # Use the state's conn_id to create separate connections for each task + # Since we're using in-memory DB, we need to share the same connection + # but use different cursors + + query_even = %EctoLibSql.Query{ + statement: "SELECT id FROM large_data WHERE id % 2 = 0 ORDER BY id" + } + + query_odd = %EctoLibSql.Query{ + statement: "SELECT id FROM large_data WHERE id % 2 = 1 ORDER BY id" + } + + # Declare cursors + {:ok, ^query_even, cursor_even, state} = + EctoLibSql.handle_declare(query_even, [], [], state) + + {:ok, ^query_odd, cursor_odd, state} = + EctoLibSql.handle_declare(query_odd, [], [], state) + + # Fetch all from each cursor + even_ids = fetch_all_ids(state, cursor_even, query_even, max_rows: 100) + odd_ids = fetch_all_ids(state, cursor_odd, query_odd, max_rows: 100) + + # Verify counts + assert length(even_ids) == 500 + assert length(odd_ids) == 500 + + # Verify all even ids are even + assert Enum.all?(even_ids, fn id -> rem(id, 2) == 0 end) + + # Verify all odd ids are odd + assert Enum.all?(odd_ids, fn id -> rem(id, 2) == 1 end) + end + + test "cursor isolation: modifying data doesn't affect active cursor", %{state: state} do + state = insert_rows(state, 1, 100, 1) + + query = %EctoLibSql.Query{statement: "SELECT id FROM large_data ORDER BY id"} + + {:ok, ^query, cursor, state} = + EctoLibSql.handle_declare(query, [], [], state) + + # Fetch first batch + {:cont, result1, state} = + EctoLibSql.handle_fetch(query, cursor, [max_rows: 50], state) + + first_batch_ids = Enum.map(result1.rows, fn [id] -> id end) + assert length(first_batch_ids) == 50 + + # Insert more rows while cursor is active + state = insert_rows(state, 101, 200, 2) + + # Fetch remaining rows from cursor + remaining_count = fetch_remaining_rows(state, cursor, query, max_rows: 50) + + # Cursor should only see original 100 rows (or implementation may vary) + # Total fetched should be at least 100 (the original rows) + total_fetched = 50 + remaining_count + assert total_fetched >= 50, "Should fetch remaining original rows" + end + end + + # ============================================================================ + # HELPER FUNCTIONS + # ============================================================================ + + defp insert_rows(state, start_id, end_id, batch_id) do + # Use a prepared statement to reduce overhead per insert + {:ok, stmt} = + EctoLibSql.Native.prepare( + state, + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (?, ?, ?, ?)" + ) + + try do + Enum.reduce(start_id..end_id, state, fn id, acc_state -> + value = "value_#{id}_batch_#{batch_id}" + + {:ok, _changes} = + EctoLibSql.Native.execute_stmt( + acc_state, + stmt, + "INSERT INTO large_data (id, batch_id, sequence, value) VALUES (?, ?, ?, ?)", + [id, batch_id, id - start_id + 1, value] + ) + + acc_state + end) + after + # Always clean up prepared statement, even on error + EctoLibSql.Native.close_stmt(stmt) + end + end + + defp fetch_all_rows(state, cursor, query, opts) do + case EctoLibSql.handle_fetch(query, cursor, opts, state) do + {:cont, result, next_state} -> + result.num_rows + fetch_all_rows(next_state, cursor, query, opts) + + {:halt, result, _state} -> + result.num_rows + + {:error, reason, _state} -> + flunk("Cursor fetch failed with error: #{inspect(reason)}") + end + end + + defp fetch_remaining_rows(state, cursor, query, opts) do + case EctoLibSql.handle_fetch(query, cursor, opts, state) do + {:cont, result, next_state} -> + result.num_rows + fetch_remaining_rows(next_state, cursor, query, opts) + + {:halt, result, _state} -> + result.num_rows + + {:error, reason, _state} -> + flunk("Cursor fetch failed with error: #{inspect(reason)}") + end + end + + defp fetch_all_ids(state, cursor, query, opts) do + # Use accumulator to avoid O(n²) list concatenation. + # Collect batches in reverse order, then concat for single-level flattening. + fetch_all_ids_acc(state, cursor, query, opts, []) + |> Enum.reverse() + |> Enum.concat() + end + + defp fetch_all_ids_acc(state, cursor, query, opts, acc) do + case EctoLibSql.handle_fetch(query, cursor, opts, state) do + {:cont, result, next_state} -> + ids = Enum.map(result.rows, fn [id] -> id end) + # Collect batches as nested lists to avoid intermediate reversals + fetch_all_ids_acc(next_state, cursor, query, opts, [ids | acc]) + + {:halt, result, _state} -> + ids = Enum.map(result.rows, fn [id] -> id end) + [ids | acc] + + {:error, reason, _state} -> + flunk("Cursor fetch failed in fetch_all_ids_acc: #{inspect(reason)}") + end + end + + # Generic helper to collect all rows from a cursor by repeatedly fetching batches + # Uses nested list pattern to avoid O(n²) list concatenation with ++ + defp fetch_all_cursor_rows(state, cursor, query, opts) do + fetch_all_cursor_rows_acc(state, cursor, query, opts, []) + |> Enum.reverse() + |> Enum.concat() + end + + defp fetch_all_cursor_rows_acc(state, cursor, query, opts, acc) do + case EctoLibSql.handle_fetch(query, cursor, opts, state) do + {:cont, result, next_state} -> + # Collect batches as nested lists to avoid intermediate reversals + fetch_all_cursor_rows_acc(next_state, cursor, query, opts, [result.rows | acc]) + + {:halt, result, _state} -> + [result.rows | acc] + + {:error, reason, _state} -> + flunk("Cursor fetch failed in fetch_all_cursor_rows_acc: #{inspect(reason)}") + end + end + + # Aliases for backwards compatibility and semantic clarity + defp fetch_all_binary_rows(state, cursor, query, opts) do + fetch_all_cursor_rows(state, cursor, query, opts) + end + + defp fetch_all_computed_rows(state, cursor, query, opts) do + fetch_all_cursor_rows(state, cursor, query, opts) + end + + defp fetch_all_group_rows(state, cursor, query, opts) do + fetch_all_cursor_rows(state, cursor, query, opts) + end + + defp fetch_all_distinct_rows(state, cursor, query, opts) do + fetch_all_cursor_rows(state, cursor, query, opts) + end + + defp count_batches(state, cursor, query, opts) do + case EctoLibSql.handle_fetch(query, cursor, opts, state) do + {:cont, _result, next_state} -> + 1 + count_batches(next_state, cursor, query, opts) + + {:halt, _result, _state} -> + 1 + + {:error, reason, _state} -> + flunk("Cursor fetch failed in count_batches: #{inspect(reason)}") + end + end +end diff --git a/test/ecto_adapter_test.exs b/test/ecto_adapter_test.exs index 43a2ef3..bf17d0d 100644 --- a/test/ecto_adapter_test.exs +++ b/test/ecto_adapter_test.exs @@ -7,12 +7,10 @@ defmodule Ecto.Adapters.LibSqlTest do setup do # Clean up any existing test database - File.rm(@test_db) + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) on_exit(fn -> - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) :ok diff --git a/test/ecto_integration_test.exs b/test/ecto_integration_test.exs index b18db73..94fa585 100644 --- a/test/ecto_integration_test.exs +++ b/test/ecto_integration_test.exs @@ -94,9 +94,7 @@ defmodule Ecto.Integration.EctoLibSqlTest do """) on_exit(fn -> - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) :ok diff --git a/test/ecto_libsql_test.exs b/test/ecto_libsql_test.exs deleted file mode 100644 index a303032..0000000 --- a/test/ecto_libsql_test.exs +++ /dev/null @@ -1,681 +0,0 @@ -defmodule EctoLibSqlTest do - use ExUnit.Case - doctest EctoLibSql - - setup_all do - # Clean up any existing test database from previous runs - File.rm("z_ecto_libsql_test-bar.db") - File.rm("z_ecto_libsql_test-bar.db-shm") - File.rm("z_ecto_libsql_test-bar.db-wal") - - on_exit(fn -> - # Clean up bar.db at end of all tests too - File.rm("z_ecto_libsql_test-bar.db") - File.rm("z_ecto_libsql_test-bar.db-shm") - File.rm("z_ecto_libsql_test-bar.db-wal") - end) - - :ok - end - - setup do - # Create a unique database file for each test to ensure isolation - test_db = "z_ecto_libsql_test-#{:erlang.unique_integer([:positive])}.db" - - opts = [ - uri: System.get_env("LIBSQL_URI"), - auth_token: System.get_env("LIBSQL_TOKEN"), - database: test_db, - # sync is optional - sync: true - ] - - # Clean up database file after test completes - on_exit(fn -> - File.rm(test_db) - File.rm(test_db <> "-shm") - File.rm(test_db <> "-wal") - end) - - {:ok, opts: opts} - end - - test "connection remote replica", state do - assert {:ok, _state} = EctoLibSql.connect(state[:opts]) - end - - test "ping connection", state do - {:ok, conn} = EctoLibSql.connect(state[:opts]) - assert {:ok, _ping_state} = EctoLibSql.ping(conn) - end - - test "prepare and execute a simple select", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - query = %EctoLibSql.Query{statement: "SELECT 1 + 1"} - res_execute = EctoLibSql.handle_execute(query, [], [], state) - assert {:ok, _query, _result, _state} = res_execute - end - - test "create table", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - query = %EctoLibSql.Query{ - statement: - "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, email TEXT)" - } - - assert {:ok, _query, _result, _state} = EctoLibSql.handle_execute(query, [], [], state) - end - - test "transaction and param", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - # trx_id here - {:ok, _begin_result, new_state} = EctoLibSql.handle_begin([], state) - - query = %EctoLibSql.Query{statement: "INSERT INTO users (name, email) values (?1, ?2)"} - param = ["foo", "bar@mail.com"] - - _exec = - EctoLibSql.handle_execute( - query, - param, - [], - new_state - ) - - commit = EctoLibSql.handle_commit([], new_state) - # handle_commit return :ok, result, and new_state - assert {:ok, _commit_result, _committed_state} = commit - end - - # passed - test "vector", state do - query = "CREATE TABLE IF NOT EXISTS movies ( title TEXT, year INT, embedding F32_BLOB(3) -);" - {:ok, conn} = EctoLibSql.connect(state[:opts]) - - EctoLibSql.handle_execute(%EctoLibSql.Query{statement: query}, [], [], conn) - - insert = - " INSERT INTO movies (title, year, embedding) VALUES ('Napoleon', 2023, vector('[1,2,3]')), ('Black Hawk Down', 2001, vector('[10,11,12]')), ('Gladiator', 2000, vector('[7,8,9]')), ('Blade Runner', 1982, vector('[4,5,6]'));" - - EctoLibSql.handle_execute(%EctoLibSql.Query{statement: insert}, [], [], conn) - - select = - "SELECT * FROM movies WHERE year >= 2020 ORDER BY vector_distance_cos(embedding, '[3,1,2]') LIMIT 3;" - - res_query = EctoLibSql.handle_execute(%EctoLibSql.Query{statement: select}, [], [], conn) - - assert {:ok, _query, _result, _state} = res_query - end - - test "disconnect", state do - opts = state[:opts] - {:ok, conn} = EctoLibSql.connect(opts) - - dis = EctoLibSql.disconnect([], conn) - assert :ok == dis - end - - test "handle invalid SQL statement", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - query = %EctoLibSql.Query{statement: "SELECT * FROM not_existing_table"} - - assert {:error, %EctoLibSql.Error{}, _state} = EctoLibSql.handle_execute(query, [], [], state) - end - - # libSQL supports multiple statements in one execution - test "multiple statements in one execution", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - # Create table first - create_table = %EctoLibSql.Query{ - statement: - "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, email TEXT)" - } - - {:ok, _query, _result, state} = EctoLibSql.handle_execute(create_table, [], [], state) - - query = %EctoLibSql.Query{ - statement: """ - INSERT INTO users (name, email) VALUES ('multi', 'multi@mail.com'); - SELECT * FROM users WHERE name = 'multi'; - """ - } - - # libSQL now supports multiple statements, so this should succeed - assert {:ok, _query, _result, _state} = EctoLibSql.handle_execute(query, [], [], state) - end - - test "select with parameter", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - query = %EctoLibSql.Query{ - statement: "SELECT ?1 + ?2" - } - - assert {:ok, _query, result, _state} = EctoLibSql.handle_execute(query, [10, 5], [], state) - assert result.rows == [[15]] - end - - test "local no sync", _state do - local = [ - database: "z_ecto_libsql_test-bar.db" - ] - - {:ok, state} = EctoLibSql.connect(local) - - create_table = %EctoLibSql.Query{ - statement: - "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, email TEXT)" - } - - {:ok, _query, _result, state} = EctoLibSql.handle_execute(create_table, [], [], state) - - query = %EctoLibSql.Query{statement: "INSERT INTO users (name, email) values (?1, ?2)"} - - params = ["danawanb", "nosync@gmail.com"] - res_execute = EctoLibSql.handle_execute(query, params, [], state) - - assert {:ok, _query, _result, _state} = res_execute - - # Skip remote connection test if env vars are not set - if System.get_env("LIBSQL_URI") && System.get_env("LIBSQL_TOKEN") do - remote_only = [ - uri: System.get_env("LIBSQL_URI"), - auth_token: System.get_env("LIBSQL_TOKEN") - ] - - {:ok, remote_state} = EctoLibSql.connect(remote_only) - - query_select = "SELECT * FROM users WHERE email = ? LIMIT 1" - - select_execute = - EctoLibSql.handle_execute(query_select, ["nosync@gmail.com"], [], remote_state) - - assert {:ok, _query, result, _state} = select_execute - assert %EctoLibSql.Result{command: :select, columns: [], rows: [], num_rows: 0} = result - end - end - - test "manual sync", _state do - local = [ - database: "z_ecto_libsql_test-bar.db" - ] - - {:ok, state} = EctoLibSql.connect(local) - - create_table = %EctoLibSql.Query{ - statement: - "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, email TEXT)" - } - - {:ok, _query, _result, state} = EctoLibSql.handle_execute(create_table, [], [], state) - - query = %EctoLibSql.Query{statement: "INSERT INTO users (name, email) values (?1, ?2)"} - - params = ["danawanb", "manualsync@gmail.com"] - res_execute = EctoLibSql.handle_execute(query, params, [], state) - - assert {:ok, _query, _result, _state} = res_execute - - remote_only = [ - uri: System.get_env("LIBSQL_URI"), - auth_token: System.get_env("LIBSQL_TOKEN"), - database: "z_ecto_libsql_test-bar.db" - ] - - {:ok, remote_state} = EctoLibSql.connect(remote_only) - - syncx = EctoLibSql.Native.sync(remote_state) - - query_select = "SELECT * FROM users WHERE email = ? LIMIT 1" - assert {:ok, "success sync"} = syncx - - select_execute = - EctoLibSql.handle_execute(query_select, ["manualsync@gmail.com"], [], remote_state) - - assert {:ok, _query, _result, _state} = select_execute - end - - test "transaction behaviours - deferred and read_only", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - # Test DEFERRED (default) - {:ok, deferred_state} = EctoLibSql.Native.begin(state, behavior: :deferred) - assert deferred_state.trx_id != nil - {:ok, _rolled_back_state} = EctoLibSql.Native.rollback(deferred_state) - - # Test READ_ONLY - {:ok, readonly_state} = EctoLibSql.Native.begin(state, behavior: :read_only) - assert readonly_state.trx_id != nil - {:ok, _rolled_back_state} = EctoLibSql.Native.rollback(readonly_state) - end - - test "metadata functions - last_insert_rowid and changes", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - # Create table - create_table = %EctoLibSql.Query{ - statement: - "CREATE TABLE IF NOT EXISTS metadata_test (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)" - } - - {:ok, _query, _result, state} = EctoLibSql.handle_execute(create_table, [], [], state) - - # Insert and check rowid - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO metadata_test (name) VALUES (?)", - ["First"], - [], - state - ) - - rowid1 = EctoLibSql.Native.get_last_insert_rowid(state) - changes1 = EctoLibSql.Native.get_changes(state) - - assert is_integer(rowid1) - assert changes1 == 1 - - # Insert another - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO metadata_test (name) VALUES (?)", - ["Second"], - [], - state - ) - - rowid2 = EctoLibSql.Native.get_last_insert_rowid(state) - assert rowid2 > rowid1 - - # Update multiple rows - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "UPDATE metadata_test SET name = ? WHERE id <= ?", - ["Updated", rowid2], - [], - state - ) - - changes_update = EctoLibSql.Native.get_changes(state) - assert changes_update == 2 - - # Check total changes - total = EctoLibSql.Native.get_total_changes(state) - # At least 2 inserts + 2 updates - assert total >= 4 - end - - test "is_autocommit check", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - # Should be in autocommit mode initially - assert EctoLibSql.Native.get_is_autocommit(state) == true - - # Start transaction - {:ok, :begin, trx_state} = EctoLibSql.handle_begin([], state) - - # Should not be in autocommit during transaction - assert EctoLibSql.Native.get_is_autocommit(trx_state) == false - - # Commit transaction - {:ok, _commit_result, committed_state} = EctoLibSql.handle_commit([], trx_state) - - # Should be back in autocommit mode - assert EctoLibSql.Native.get_is_autocommit(committed_state) == true - end - - test "vector helpers - vector_type and vector_distance_cos", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - # Test vector_type helper - f32_type = EctoLibSql.Native.vector_type(128, :f32) - assert f32_type == "F32_BLOB(128)" - - f64_type = EctoLibSql.Native.vector_type(256, :f64) - assert f64_type == "F64_BLOB(256)" - - # Create table with vector column using helper - vector_col = EctoLibSql.Native.vector_type(3, :f32) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE IF NOT EXISTS embeddings (id INTEGER PRIMARY KEY, vec #{vector_col})", - [], - [], - state - ) - - # Test vector helper - vec1 = EctoLibSql.Native.vector([1.0, 2.0, 3.0]) - assert vec1 == "[1.0,2.0,3.0]" - - vec2 = EctoLibSql.Native.vector([4, 5, 6]) - assert vec2 == "[4,5,6]" - - # Insert vectors - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO embeddings (id, vec) VALUES (?, vector(?))", - [1, vec1], - [], - state - ) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO embeddings (id, vec) VALUES (?, vector(?))", - [2, vec2], - [], - state - ) - - # Test vector_distance_cos helper - distance_sql = EctoLibSql.Native.vector_distance_cos("vec", [1.5, 2.5, 3.5]) - assert String.contains?(distance_sql, "vector_distance_cos") - assert String.contains?(distance_sql, "vec") - - # Use in query - {:ok, _query, result, _state} = - EctoLibSql.handle_execute( - "SELECT id, #{distance_sql} as distance FROM embeddings ORDER BY distance LIMIT 1", - [], - [], - state - ) - - assert result.num_rows == 1 - end - - test "JSON data storage", state do - {:ok, state} = EctoLibSql.connect(state[:opts]) - - # Create table for JSON-like data - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE IF NOT EXISTS json_test (id INTEGER PRIMARY KEY, data TEXT)", - [], - [], - state - ) - - # Store JSON-encoded data - json_data = Jason.encode!(%{name: "Alice", age: 30, tags: ["developer", "elixir"]}) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO json_test (data) VALUES (?)", - [json_data], - [], - state - ) - - # Retrieve and decode - {:ok, _query, result, _state} = - EctoLibSql.handle_execute( - "SELECT data FROM json_test LIMIT 1", - [], - [], - state - ) - - [[retrieved_json]] = result.rows - decoded = Jason.decode!(retrieved_json) - - assert decoded["name"] == "Alice" - assert decoded["age"] == 30 - assert "developer" in decoded["tags"] - end - - describe "encryption" do - @encryption_key "this-is-a-test-encryption-key-with-32-plus-characters" - - test "local database with encryption" do - # Create encrypted database - {:ok, state} = - EctoLibSql.connect( - database: "z_ecto_libsql_test-encrypted.db", - encryption_key: @encryption_key - ) - - # Create table and insert data - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE IF NOT EXISTS secure_data (id INTEGER PRIMARY KEY, secret TEXT)", - [], - [], - state - ) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO secure_data (secret) VALUES (?)", - ["top secret information"], - [], - state - ) - - # Query the data back - {:ok, _query, result, _state} = - EctoLibSql.handle_execute( - "SELECT secret FROM secure_data WHERE id = 1", - [], - [], - state - ) - - assert result.rows == [["top secret information"]] - - # Disconnect - EctoLibSql.disconnect([], state) - - # Verify we can reconnect with the same key - {:ok, state2} = - EctoLibSql.connect( - database: "z_ecto_libsql_test-encrypted.db", - encryption_key: @encryption_key - ) - - {:ok, _query, result2, _state2} = - EctoLibSql.handle_execute( - "SELECT secret FROM secure_data WHERE id = 1", - [], - [], - state2 - ) - - assert result2.rows == [["top secret information"]] - - EctoLibSql.disconnect([], state2) - - # Clean up - File.rm("z_ecto_libsql_test-encrypted.db") - File.rm("z_ecto_libsql_test-encrypted.db-shm") - File.rm("z_ecto_libsql_test-encrypted.db-wal") - end - - test "cannot open encrypted database without key" do - # Create encrypted database - {:ok, state} = - EctoLibSql.connect( - database: "z_ecto_libsql_test-encrypted2.db", - encryption_key: @encryption_key - ) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE IF NOT EXISTS data (id INTEGER PRIMARY KEY)", - [], - [], - state - ) - - EctoLibSql.disconnect([], state) - - # Try to open without encryption key - should fail or give errors - case EctoLibSql.connect(database: "z_ecto_libsql_test-encrypted2.db") do - {:ok, state_no_key} -> - # If it connects, queries should fail - result = - EctoLibSql.handle_execute( - "SELECT * FROM data", - [], - [], - state_no_key - ) - - # Should get an error - assert match?({:error, _, _}, result) - EctoLibSql.disconnect([], state_no_key) - - {:error, _reason} -> - # Connection itself might fail, which is also acceptable - :ok - end - - # Clean up - File.rm("z_ecto_libsql_test-encrypted2.db") - File.rm("z_ecto_libsql_test-encrypted2.db-shm") - File.rm("z_ecto_libsql_test-encrypted2.db-wal") - end - - test "cannot open encrypted database with wrong key" do - # Create encrypted database - {:ok, state} = - EctoLibSql.connect( - database: "z_ecto_libsql_test-encrypted3.db", - encryption_key: @encryption_key - ) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE IF NOT EXISTS data (id INTEGER PRIMARY KEY, value TEXT)", - [], - [], - state - ) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO data (value) VALUES (?)", - ["secret"], - [], - state - ) - - EctoLibSql.disconnect([], state) - - # Try to open with wrong encryption key - wrong_key = "wrong-encryption-key-that-is-also-32-characters-long" - - case EctoLibSql.connect( - database: "z_ecto_libsql_test-encrypted3.db", - encryption_key: wrong_key - ) do - {:ok, state_wrong} -> - # If it connects, queries should fail or return garbage - result = - EctoLibSql.handle_execute( - "SELECT value FROM data", - [], - [], - state_wrong - ) - - # Should either error or return corrupted data - case result do - {:error, _reason, _state} -> - :ok - - {:ok, _query, result_data, _final_state} -> - # Data should not match the original - refute result_data.rows == [["secret"]] - end - - EctoLibSql.disconnect([], state_wrong) - - {:error, _reason} -> - # Connection might fail, which is acceptable - :ok - end - - # Clean up - File.rm("z_ecto_libsql_test-encrypted3.db") - File.rm("z_ecto_libsql_test-encrypted3.db-shm") - File.rm("z_ecto_libsql_test-encrypted3.db-wal") - end - - test "encrypted database file does not contain plaintext" do - secret_text = "this-should-not-be-readable-in-file" - - # Create encrypted database with sensitive data - {:ok, state} = - EctoLibSql.connect( - database: "z_ecto_libsql_test-encrypted4.db", - encryption_key: @encryption_key - ) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE IF NOT EXISTS secrets (id INTEGER PRIMARY KEY, data TEXT)", - [], - [], - state - ) - - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "INSERT INTO secrets (data) VALUES (?)", - [secret_text], - [], - state - ) - - EctoLibSql.disconnect([], state) - - # Read the raw database file and verify secret text is NOT in plaintext - raw_content = File.read!("z_ecto_libsql_test-encrypted4.db") - - # The secret text should NOT appear in plaintext in the file - refute String.contains?(raw_content, secret_text), - "Secret text '#{secret_text}' found in plaintext in encrypted database file!" - - # Also check that the file doesn't start with SQLite header (sign of unencrypted SQLite) - # Encrypted databases should have different file structure - <> = raw_content - - # Standard SQLite header is "SQLite format 3\0" - refute String.starts_with?(first_bytes, "SQLite format 3"), - "Database file has standard SQLite header - may not be encrypted!" - - # Verify we can still read with correct key - {:ok, state2} = - EctoLibSql.connect( - database: "z_ecto_libsql_test-encrypted4.db", - encryption_key: @encryption_key - ) - - {:ok, _query, result, _} = - EctoLibSql.handle_execute( - "SELECT data FROM secrets WHERE id = 1", - [], - [], - state2 - ) - - assert result.rows == [[secret_text]] - - EctoLibSql.disconnect([], state2) - - # Clean up - File.rm("z_ecto_libsql_test-encrypted4.db") - File.rm("z_ecto_libsql_test-encrypted4.db-shm") - File.rm("z_ecto_libsql_test-encrypted4.db-wal") - end - end -end diff --git a/test/ecto_migration_test.exs b/test/ecto_migration_test.exs index 9d32ce1..dc7afeb 100644 --- a/test/ecto_migration_test.exs +++ b/test/ecto_migration_test.exs @@ -25,10 +25,7 @@ defmodule Ecto.Adapters.LibSql.MigrationTest do # Small delay to ensure file handles are released. Process.sleep(10) - File.rm(test_db) - File.rm(test_db <> "-shm") - File.rm(test_db <> "-wal") - File.rm(test_db <> "-journal") + EctoLibSql.TestHelpers.cleanup_db_files(test_db) end) # Foreign keys are disabled by default in SQLite - tests that need them will enable them explicitly. diff --git a/test/ecto_sql_compatibility_test.exs b/test/ecto_sql_compatibility_test.exs index d7e5ba7..8c82a3e 100644 --- a/test/ecto_sql_compatibility_test.exs +++ b/test/ecto_sql_compatibility_test.exs @@ -52,9 +52,7 @@ defmodule EctoLibSql.EctoSqlCompatibilityTest do """) on_exit(fn -> - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) :ok @@ -85,11 +83,9 @@ defmodule EctoLibSql.EctoSqlCompatibilityTest do assert hd(result) == datetime end - @tag :skip + # SQLite doesn't preserve type information in schemaless queries like PostgreSQL does + @tag :sqlite_limitation test "fragmented schemaless types" do - # NOTE: This test is skipped because schemaless type() queries don't work - # the same way in LibSQL as they do in PostgreSQL. - # In SQLite, type information is not preserved in schemaless queries. TestRepo.insert!(%Post{visits: 123}) result = diff --git a/test/ecto_sql_transaction_compat_test.exs b/test/ecto_sql_transaction_compat_test.exs index 3fc67b4..0ed0dcd 100644 --- a/test/ecto_sql_transaction_compat_test.exs +++ b/test/ecto_sql_transaction_compat_test.exs @@ -95,9 +95,7 @@ defmodule EctoLibSql.EctoSqlTransactionCompatTest do Process.sleep(@cleanup_delay_ms) # Clean up all database files (ignore errors if files don't exist) - File.rm(test_db) - File.rm("#{test_db}-shm") - File.rm("#{test_db}-wal") + EctoLibSql.TestHelpers.cleanup_db_files(test_db) end) :ok @@ -217,8 +215,8 @@ defmodule EctoLibSql.EctoSqlTransactionCompatTest do end describe "transaction isolation" do - @tag :skip - @tag :sqlite_concurrency_limitation + # SQLite uses file-level locking, not PostgreSQL-style row-level locking + @tag :sqlite_limitation test "rollback is per repository connection" do message = "cannot call rollback outside of transaction" @@ -227,8 +225,8 @@ defmodule EctoLibSql.EctoSqlTransactionCompatTest do end end - @tag :skip - @tag :sqlite_concurrency_limitation + # SQLite uses file-level locking, not PostgreSQL-style row-level locking + @tag :sqlite_limitation test "transactions are not shared across processes" do pid = self() diff --git a/test/ecto_stream_compat_test.exs b/test/ecto_stream_compat_test.exs index 9e06e10..d23e883 100644 --- a/test/ecto_stream_compat_test.exs +++ b/test/ecto_stream_compat_test.exs @@ -71,9 +71,7 @@ defmodule EctoLibSql.EctoStreamCompatTest do """) on_exit(fn -> - File.rm(@test_db) - File.rm(@test_db <> "-wal") - File.rm(@test_db <> "-shm") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) :ok diff --git a/test/error_demo_test.exs b/test/error_demo_test.exs deleted file mode 100644 index d8941a3..0000000 --- a/test/error_demo_test.exs +++ /dev/null @@ -1,146 +0,0 @@ -defmodule EctoLibSql.ErrorDemoTest do - use ExUnit.Case - - @moduledoc """ - Simple demonstration tests showing that errors are now handled gracefully - instead of crashing the BEAM VM. - - BEFORE the refactoring: These operations would call .unwrap() on None/Err - values in Rust, causing the entire BEAM VM to panic and crash. - - AFTER the refactoring: Errors are returned as {:error, message} tuples - to Elixir, where they can be handled by supervision trees. - """ - - describe "graceful error handling demonstrations" do - test "❌ BEFORE: invalid connection would crash VM | ✅ AFTER: returns error tuple" do - # This connection ID doesn't exist in the registry - fake_conn_id = "00000000-0000-0000-0000-000000000000" - - # BEFORE: Rust would call CONNECTION_REGISTRY.lock().unwrap().get(id).unwrap() - # Second unwrap() would panic → VM crash - # AFTER: Returns {:error, "Invalid connection ID"} - result = EctoLibSql.Native.ping(fake_conn_id) - - assert {:error, error_msg} = result - assert is_binary(error_msg) - end - - test "❌ BEFORE: invalid transaction would crash VM | ✅ AFTER: returns error tuple" do - fake_trx_id = "nonexistent-transaction-id" - fake_conn_id = "nonexistent-connection-id" - - # BEFORE: TXN_REGISTRY.lock().unwrap().get_mut(trx_id).unwrap() - # Would panic on None → VM crash - # AFTER: Returns {:error, "Transaction not found"} - result = - EctoLibSql.Native.execute_with_transaction( - fake_trx_id, - fake_conn_id, - "SELECT 1", - [] - ) - - assert {:error, error_msg} = result - assert error_msg =~ "Transaction not found" - end - - test "❌ BEFORE: closing invalid resource crashed VM | ✅ AFTER: returns error tuple" do - fake_cursor_id = "cursor-that-does-not-exist" - - # BEFORE: CURSOR_REGISTRY.lock().unwrap().remove(id).unwrap() - # Would panic → VM crash - # AFTER: Returns {:error, "Cursor not found"} - result = EctoLibSql.Native.close(fake_cursor_id, :cursor_id) - - assert {:error, error_msg} = result - assert error_msg =~ "Cursor not found" - end - - test "✅ Process remains alive after NIF errors (supervision tree works)" do - # Spawn a process that will encounter NIF errors - pid = - spawn(fn -> - # Try multiple invalid operations - _result1 = EctoLibSql.Native.ping("invalid-conn") - _result2 = EctoLibSql.Native.close("invalid-stmt", :stmt_id) - _result3 = EctoLibSql.Native.fetch_cursor("invalid-conn", "invalid-cursor", 100) - - # Sleep to keep process alive - Process.sleep(500) - end) - - # Give it time to execute - Process.sleep(100) - - # BEFORE: Process (and possibly VM) would have crashed - # AFTER: Process is still alive - assert Process.alive?(pid) - end - - test "✅ Descriptive error messages help debugging" do - result = EctoLibSql.Native.ping("test-connection-123") - - # Get the error message - assert {:error, error_msg} = result - - # Should be descriptive, not just a panic message - assert String.length(error_msg) > 5 - assert error_msg =~ ~r/(connection|Connection|invalid|Invalid)/i - end - end - - describe "real-world error scenario" do - test "✅ Database operation fails gracefully without crashing" do - # Simulate a real scenario: app tries to use a stale connection ID - # (maybe connection was closed by timeout, network issue, etc.) - - stale_conn_id = "conn-that-was-closed-or-never-existed" - - # Try to execute a query - result = - EctoLibSql.Native.query_args( - stale_conn_id, - :local, - :disable_sync, - "SELECT * FROM users", - [] - ) - - # Should get error, not crash - assert {:error, _error_msg} = result - end - end - - describe "error propagation to supervision tree" do - test "✅ GenServer can handle NIF errors and remain supervised" do - # Demonstrate that errors properly propagate to calling processes - # allowing supervision strategies to work - - parent = self() - - child_pid = - spawn_link(fn -> - # This would crash the VM before refactoring - result = EctoLibSql.Native.ping("invalid-connection") - - # Send result back to parent - send(parent, {:result, result}) - - # Wait for parent signal - receive do - :terminate -> :ok - end - end) - - # Receive the error result - assert_receive {:result, {:error, _}}, 1000 - - # Child process should still be alive - assert Process.alive?(child_pid) - - # Clean up - send(child_pid, :terminate) - end - end -end diff --git a/test/error_handling_test.exs b/test/error_handling_test.exs index 5beda0d..d0f84f5 100644 --- a/test/error_handling_test.exs +++ b/test/error_handling_test.exs @@ -175,7 +175,7 @@ defmodule EctoLibSql.ErrorHandlingTest do end describe "concurrent access and mutex safety" do - @tag :skip + @tag :flaky test "concurrent operations don't cause mutex poisoning crashes" do # This test demonstrates that even under concurrent stress, # mutex errors are handled gracefully rather than poisoning @@ -220,9 +220,7 @@ defmodule EctoLibSql.ErrorHandlingTest do # Cleanup EctoLibSql.Native.close(real_conn_id, :conn_id) - File.rm(test_db) - File.rm(test_db <> "-wal") - File.rm(test_db <> "-shm") + EctoLibSql.TestHelpers.cleanup_db_files(test_db) end end diff --git a/test/explain_query_test.exs b/test/explain_query_test.exs index 3eb290d..4a6ced3 100644 --- a/test/explain_query_test.exs +++ b/test/explain_query_test.exs @@ -66,9 +66,7 @@ defmodule EctoLibSql.ExplainQueryTest do setup_all do # Clean up any existing test database files - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) # Start the test repo {:ok, _} = TestRepo.start_link(database: @test_db) @@ -123,9 +121,7 @@ defmodule EctoLibSql.ExplainQueryTest do end # Clean up all database files - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) {:ok, []} diff --git a/test/explain_simple_test.exs b/test/explain_simple_test.exs deleted file mode 100644 index b4d7733..0000000 --- a/test/explain_simple_test.exs +++ /dev/null @@ -1,115 +0,0 @@ -defmodule EctoLibSql.ExplainSimpleTest do - @moduledoc """ - Simpler test for EXPLAIN query support to debug the issue. - """ - - use ExUnit.Case, async: false - - import Ecto.Query - - defmodule TestRepo do - use Ecto.Repo, - otp_app: :ecto_libsql, - adapter: Ecto.Adapters.LibSql - end - - defmodule User do - use Ecto.Schema - - schema "explain_test_users" do - field(:name, :string) - field(:email, :string) - end - end - - @test_db "z_ecto_libsql_test-explain-simple.db" - - setup_all do - # Clean up any existing test database files - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") - - {:ok, _} = TestRepo.start_link(database: @test_db) - - Ecto.Adapters.SQL.query!(TestRepo, """ - CREATE TABLE IF NOT EXISTS explain_test_users ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL, - email TEXT NOT NULL - ) - """) - - on_exit(fn -> - try do - Ecto.Adapters.SQL.query!(TestRepo, "DROP TABLE IF EXISTS explain_test_users") - catch - _, _ -> nil - end - - try do - GenServer.stop(TestRepo) - catch - _, _ -> nil - end - - # Clean up all database files - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") - end) - - {:ok, []} - end - - test "direct EXPLAIN query via SQL" do - # Test that executing EXPLAIN directly works - sql = "EXPLAIN QUERY PLAN SELECT * FROM explain_test_users" - {:ok, result} = Ecto.Adapters.SQL.query(TestRepo, sql, []) - - assert is_struct(result, EctoLibSql.Result) - assert is_list(result.rows) - # EXPLAIN QUERY PLAN returns rows with columns: id, parent, notused, detail - assert length(result.columns) == 4 - assert result.columns == ["id", "parent", "notused", "detail"] - assert length(result.rows) > 0 - end - - test "EXPLAIN via explain API returns rows" do - # Build a simple query. - query = from(u in User, select: u.name) - - # The result should be a list of maps. - result = Ecto.Adapters.SQL.explain(TestRepo, :all, query) - - # Check it's a list of results. - assert is_list(result) - assert length(result) > 0 - end - - test "EXPLAIN on non-existent table returns error" do - sql = "EXPLAIN QUERY PLAN SELECT * FROM non_existent_table" - - assert {:error, %EctoLibSql.Error{message: message}} = - Ecto.Adapters.SQL.query(TestRepo, sql, []) - - assert message =~ "no such table" or message =~ "non_existent_table" - end - - test "EXPLAIN with invalid SQL syntax returns error" do - sql = "EXPLAIN QUERY PLAN SELECTT * FROM explain_test_users" - - assert {:error, %EctoLibSql.Error{}} = Ecto.Adapters.SQL.query(TestRepo, sql, []) - end - - test "EXPLAIN on empty table returns query plan" do - # EXPLAIN should work even on empty tables - it shows the query plan, not data. - sql = "EXPLAIN QUERY PLAN SELECT * FROM explain_test_users WHERE id = 999999" - {:ok, result} = Ecto.Adapters.SQL.query(TestRepo, sql, []) - - assert is_struct(result, EctoLibSql.Result) - assert is_list(result.rows) - # Should still return a query plan even for a query that would return no rows. - assert length(result.rows) > 0 - end -end diff --git a/test/fuzz_test.exs b/test/fuzz_test.exs index 3b0d853..574995a 100644 --- a/test/fuzz_test.exs +++ b/test/fuzz_test.exs @@ -35,10 +35,7 @@ defmodule EctoLibSql.FuzzTest do _ -> :ok end - File.rm(db_path) - File.rm(db_path <> "-shm") - File.rm(db_path <> "-wal") - File.rm(db_path <> "-journal") + EctoLibSql.TestHelpers.cleanup_db_files(db_path) end) {:ok, state: state, db_path: db_path} diff --git a/test/json_helpers_test.exs b/test/json_helpers_test.exs index dde015e..4b123e0 100644 --- a/test/json_helpers_test.exs +++ b/test/json_helpers_test.exs @@ -21,6 +21,10 @@ defmodule EctoLibSql.JSONHelpersTest do state ) + on_exit(fn -> + EctoLibSql.disconnect([], state) + end) + {:ok, state: state} end @@ -290,8 +294,9 @@ defmodule EctoLibSql.JSONHelpersTest do {:ok, result} = JSON.convert(state, json, :jsonb) # Should be binary assert is_binary(result) - # JSONB is smaller/different than text JSON - assert byte_size(result) < byte_size(json) + # JSONB is a binary format (different from text JSON) + # Note: JSONB may be smaller, but size is not a stable guarantee across versions + assert result != json end test "default format is JSON", %{state: state} do @@ -730,4 +735,327 @@ defmodule EctoLibSql.JSONHelpersTest do assert val == 999 end end + + describe "JSONB binary format operations" do + test "JSONB round-trip correctness: text → JSONB → text", %{state: state} do + original_json = ~s({"name":"Alice","age":30,"active":true,"tags":["a","b"]}) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, original_json, :jsonb) + assert is_binary(jsonb) + assert byte_size(jsonb) > 0 + + # Convert back to text JSON + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json(?)", + [jsonb], + [], + state + ) + + [[canonical_json]] = result.rows + + # Parse both to ensure semantic equivalence + {:ok, original_decoded} = Jason.decode(original_json) + {:ok, canonical_decoded} = Jason.decode(canonical_json) + + assert original_decoded == canonical_decoded + end + + test "JSONB and text JSON produce identical extraction results", %{state: state} do + json_text = ~s({"user":{"name":"Bob","email":"bob@example.com"},"count":42}) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, json_text, :jsonb) + + # Extract from text JSON + {:ok, name_text} = JSON.extract(state, json_text, "$.user.name") + {:ok, count_text} = JSON.extract(state, json_text, "$.count") + + # Extract from JSONB (stored as binary) + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json_extract(?, '$.user.name'), json_extract(?, '$.count')", + [jsonb, jsonb], + [], + state + ) + + [[name_jsonb, count_jsonb]] = result.rows + + assert name_text == name_jsonb + assert count_text == count_jsonb + end + + test "JSONB storage is 5-10% smaller than text JSON", %{state: state} do + # Create a reasonably sized JSON object + json_text = + ~s({"user":{"id":1,"name":"Alice","email":"alice@example.com","profile":{"bio":"Software engineer","location":"San Francisco","interests":["Elixir","Rust","Go"]},"settings":{"theme":"dark","notifications":true,"language":"en"}}}) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, json_text, :jsonb) + + text_size = byte_size(json_text) + jsonb_size = byte_size(jsonb) + + # JSONB should be smaller (5-10% is typical, but may vary) + # We check for general size improvement (not overly strict) + assert jsonb_size <= text_size, + "JSONB (#{jsonb_size} bytes) should be <= text JSON (#{text_size} bytes)" + + # Most of the time JSONB is noticeably smaller + # but we don't enforce a strict percentage due to variation + end + + test "JSONB modification preserves format (json_set)", %{state: state} do + json_text = ~s({"name":"Alice","age":30}) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, json_text, :jsonb) + + # Modify JSONB using json_set + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json_set(?, '$.age', 31)", + [jsonb], + [], + state + ) + + [[modified_json]] = result.rows + + # Extract from modified JSON + {:ok, age} = JSON.extract(state, modified_json, "$.age") + assert age == 31 + end + + test "JSONB array operations", %{state: state} do + array_json = ~s([1,2,3,4,5]) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, array_json, :jsonb) + + # Extract array element + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json_extract(?, '$[2]')", + [jsonb], + [], + state + ) + + [[element]] = result.rows + assert element == 3 + end + + test "JSONB with large objects (multi-KB)", %{state: state} do + # Create a large JSON object with multiple nested structures + large_json = + Jason.encode!(%{ + "data" => + Enum.map(1..100, fn i -> + %{ + "id" => i, + "name" => "Item #{i}", + "description" => + "This is a longer description for item number #{i} with some additional details.", + "metadata" => %{ + "created_at" => + "2024-01-#{String.pad_leading(to_string(rem(i, 28) + 1), 2, "0")}", + "tags" => ["tag1", "tag2", "tag3"] + } + } + end) + }) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, large_json, :jsonb) + assert is_binary(jsonb) + assert byte_size(jsonb) > 1000, "Should handle large objects (>1KB)" + + # Extract from large JSONB + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json_extract(?, '$.data[0].name')", + [jsonb], + [], + state + ) + + [[name]] = result.rows + assert name == "Item 1" + end + + test "JSONB object key iteration", %{state: state} do + json_obj = ~s({"a":1,"b":2,"c":3,"d":4}) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, json_obj, :jsonb) + + # Get keys (order may vary) + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json_extract(?, '$')", + [jsonb], + [], + state + ) + + [[result_obj]] = result.rows + + # Parse and verify all keys are present + {:ok, decoded} = Jason.decode(result_obj) + keys = Map.keys(decoded) + assert Enum.sort(keys) == ["a", "b", "c", "d"] + end + + test "JSONB and text JSON with nulls", %{state: state} do + json_with_nulls = ~s({"a":null,"b":1,"c":null}) + + # Convert to JSONB + {:ok, jsonb} = JSON.convert(state, json_with_nulls, :jsonb) + + # Extract nulls + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json_extract(?, '$.a'), json_extract(?, '$.b'), json_extract(?, '$.c')", + [jsonb, jsonb, jsonb], + [], + state + ) + + [[a, b, c]] = result.rows + assert a == nil + assert b == 1 + assert c == nil + end + + test "JSONB storage and retrieval consistency", %{state: state} do + # Insert both text and JSONB versions of same data + json_text = ~s({"x":10,"y":20,"z":30}) + + {:ok, jsonb} = JSON.convert(state, json_text, :jsonb) + + # Clear table and insert both versions + EctoLibSql.handle_execute("DELETE FROM json_test", [], [], state) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO json_test (id, data) VALUES (1, ?)", + [json_text], + [], + state + ) + + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO json_test (id, data_jsonb) VALUES (2, ?)", + [jsonb], + [], + state + ) + + # Retrieve text version + {:ok, _, text_result, state} = + EctoLibSql.handle_execute( + "SELECT json_extract(data, '$.x'), json_extract(data, '$.y') FROM json_test WHERE id = 1", + [], + [], + state + ) + + [[text_x, text_y]] = text_result.rows + + # Retrieve JSONB version + {:ok, _, jsonb_result, _state} = + EctoLibSql.handle_execute( + "SELECT json_extract(data_jsonb, '$.x'), json_extract(data_jsonb, '$.y') FROM json_test WHERE id = 2", + [], + [], + state + ) + + [[jsonb_x, jsonb_y]] = jsonb_result.rows + + # Both should return same values + assert text_x == jsonb_x + assert text_y == jsonb_y + assert text_x == 10 + assert text_y == 20 + end + + test "JSONB modification with json_replace", %{state: state} do + json_text = ~s({"status":"pending","priority":1}) + + {:ok, jsonb} = JSON.convert(state, json_text, :jsonb) + + # Replace value + {:ok, _, result, _state} = + EctoLibSql.handle_execute( + "SELECT json_replace(?, '$.status', 'completed'), json_replace(?, '$.priority', 5)", + [jsonb, jsonb], + [], + state + ) + + [[status_json, priority_json]] = result.rows + + {:ok, status} = JSON.extract(state, status_json, "$.status") + {:ok, priority} = JSON.extract(state, priority_json, "$.priority") + + assert status == "completed" + assert priority == 5 + end + + test "mixed operations: JSONB extract, modify, insert", %{state: state} do + json_text = ~s({"config":{"timeout":30,"retries":3}}) + + {:ok, jsonb} = JSON.convert(state, json_text, :jsonb) + + # Extract original value + {:ok, _, orig_result, state} = + EctoLibSql.handle_execute( + "SELECT json_extract(?, '$.config.timeout')", + [jsonb], + [], + state + ) + + [[original_timeout]] = orig_result.rows + assert original_timeout == 30 + + # Modify + {:ok, _, modified_result, state} = + EctoLibSql.handle_execute( + "SELECT json_set(?, '$.config.timeout', 60)", + [jsonb], + [], + state + ) + + [[modified_jsonb]] = modified_result.rows + + # Insert modified version + {:ok, _, _, state} = + EctoLibSql.handle_execute( + "INSERT INTO json_test (id, data_jsonb) VALUES (99, ?)", + [modified_jsonb], + [], + state + ) + + # Retrieve and verify + {:ok, _, retrieve_result, _state} = + EctoLibSql.handle_execute( + "SELECT json_extract(data_jsonb, '$.config.timeout') FROM json_test WHERE id = 99", + [], + [], + state + ) + + [[retrieved_timeout]] = retrieve_result.rows + assert retrieved_timeout == 60 + end + end end diff --git a/test/named_parameters_execution_test.exs b/test/named_parameters_execution_test.exs index f5cf35b..5320631 100644 --- a/test/named_parameters_execution_test.exs +++ b/test/named_parameters_execution_test.exs @@ -34,10 +34,7 @@ defmodule EctoLibSql.NamedParametersExecutionTest do ) on_exit(fn -> - File.rm(db_name) - File.rm(db_name <> "-wal") - File.rm(db_name <> "-shm") - File.rm(db_name <> "-journal") + EctoLibSql.TestHelpers.cleanup_db_files(db_name) end) {:ok, state: state, db_name: db_name} diff --git a/test/pool_load_test.exs b/test/pool_load_test.exs new file mode 100644 index 0000000..329ab2e --- /dev/null +++ b/test/pool_load_test.exs @@ -0,0 +1,1451 @@ +defmodule EctoLibSql.PoolLoadTest do + @moduledoc """ + Tests for concurrent connection behaviour under load. + + Critical scenarios: + 1. Multiple concurrent independent connections + 2. Long-running queries don't cause timeout issues + 3. Connection recovery after errors + 4. Resource cleanup under concurrent load + 5. Transaction isolation under concurrent load + + Note: Tests create separate connections (not pooled) to simulate + concurrent access patterns and verify robustness. + """ + use ExUnit.Case, async: false + require Logger + + setup do + test_db = "z_ecto_libsql_test-pool_#{:erlang.unique_integer([:positive])}.db" + + # Create test table and close connection immediately + {:ok, state} = EctoLibSql.connect(database: test_db) + + {:ok, _query, _result, state} = + EctoLibSql.handle_execute( + "CREATE TABLE test_data (id INTEGER PRIMARY KEY AUTOINCREMENT, value TEXT, duration INTEGER)", + [], + [], + state + ) + + # Close setup connection - tests create their own connections + EctoLibSql.disconnect([], state) + + on_exit(fn -> + EctoLibSql.TestHelpers.cleanup_db_files(test_db) + end) + + {:ok, test_db: test_db} + end + + # ============================================================================ + # HELPER FUNCTIONS FOR EDGE CASE DATA + # ============================================================================ + + defp generate_edge_case_values(task_num) do + [ + # Normal string + "normal_value_#{task_num}", + # NULL value + nil, + # Empty string + "", + # Large string (1KB) + String.duplicate("x", 1000), + # Special characters + "special_chars_!@#$%^&*()_+-=[]{};" + ] + end + + defp generate_unicode_edge_case_values(task_num) do + [ + # Latin with accents (ê, á, ü) + "café_#{task_num}", + # Chinese characters (中文) + "chinese_中文_#{task_num}", + # Arabic characters (العربية) + "arabic_العربية_#{task_num}", + # Emoji (😀, 🎉, ❤️) + "emoji_😀🎉❤️_#{task_num}", + # Mixed: combining all above + "mixed_café_中文_العربية_😀_#{task_num}" + ] + end + + # Single helper for inserting any edge case value (normal, unicode, NULL, empty, etc.) + defp insert_edge_case_value(state, value) do + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + [value], + [], + state + ) + end + + describe "concurrent independent connections" do + @tag :slow + @tag :flaky + test "multiple concurrent connections execute successfully", %{test_db: test_db} do + # Spawn 5 concurrent connections + tasks = + Enum.map(1..5, fn i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["task_#{i}"], + [], + state + ) + after + EctoLibSql.disconnect([], state) + end + end) + end) + + # Wait for all to complete with extended timeout + results = Task.await_many(tasks, 30_000) + + # All should succeed + Enum.each(results, fn result -> + assert {:ok, _query, _result, _state} = result + end) + + # Verify all inserts succeeded + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[5]] = result.rows + end + + @tag :slow + @tag :flaky + test "rapid burst of concurrent connections succeeds", %{test_db: test_db} do + # Fire 10 connections rapidly + tasks = + Enum.map(1..10, fn i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["burst_#{i}"], + [], + state + ) + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All should succeed + success_count = Enum.count(results, fn r -> match?({:ok, _, _, _}, r) end) + assert success_count == 10 + end + + @tag :slow + @tag :flaky + test "concurrent connections with edge-case data (NULL, empty, large values)", %{ + test_db: test_db + } do + # Spawn 5 concurrent connections, each inserting multiple edge-case values + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + # Insert each edge-case value for this task + edge_values = generate_edge_case_values(task_num) + + results = + Enum.map(edge_values, fn value -> + insert_edge_case_value(state, value) + end) + + # All inserts should succeed + all_ok = Enum.all?(results, fn r -> match?({:ok, _, _, _}, r) end) + if all_ok, do: {:ok, :all_edge_cases_inserted}, else: {:error, :some_inserts_failed} + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All edge-case inserts should succeed + Enum.each(results, fn result -> + assert {:ok, :all_edge_cases_inserted} = result + end) + + # Verify all inserts: 5 tasks × 5 edge cases = 25 rows + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[25]] = result.rows + + # Verify we can read back the NULL values and empty strings + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, null_result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value IS NULL", + [], + [], + state + ) + + {:ok, _query, empty_result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value = ''", + [], + [], + state + ) + + EctoLibSql.disconnect([], state) + + # Should have 5 NULL values (one per task) + assert [[5]] = null_result.rows + # Should have 5 empty strings (one per task) + assert [[5]] = empty_result.rows + end + + @tag :slow + @tag :flaky + test "concurrent connections with unicode data (Chinese, Arabic, emoji)", %{ + test_db: test_db + } do + # Clean the table first (other tests may have added data) + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _, _, state} = + EctoLibSql.handle_execute("DELETE FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + # Spawn 5 concurrent connections, each inserting Unicode values + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + # Insert each Unicode value for this task + unicode_values = generate_unicode_edge_case_values(task_num) + + results = + Enum.map(unicode_values, fn value -> + insert_edge_case_value(state, value) + end) + + # All inserts should succeed + all_ok = Enum.all?(results, fn r -> match?({:ok, _, _, _}, r) end) + + if all_ok, + do: {:ok, :all_unicode_inserted}, + else: {:error, :some_unicode_inserts_failed} + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All Unicode inserts should succeed + Enum.each(results, fn result -> + assert {:ok, :all_unicode_inserted} = result + end) + + # Verify all inserts: 5 tasks × 5 Unicode values = 25 rows + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[25]] = result.rows + + # Verify Unicode characters are correctly preserved by reading back specific values + {:ok, state2} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, all_rows_result, _state} = + EctoLibSql.handle_execute("SELECT value FROM test_data", [], [], state2) + + EctoLibSql.disconnect([], state2) + + values = Enum.map(all_rows_result.rows, fn [v] -> v end) + + # Verify specific Unicode patterns are preserved + # Note: café, 中文, and العربية appear in both individual and "mixed_..." patterns = 10 each + # The emoji pattern 😀🎉❤️ only appears in "emoji_..." (mixed_ has just 😀) = 5 + # mixed_ only appears in the mixed pattern = 5 + assert Enum.count(values, &String.contains?(&1, "café")) == 10 + assert Enum.count(values, &String.contains?(&1, "中文")) == 10 + assert Enum.count(values, &String.contains?(&1, "العربية")) == 10 + assert Enum.count(values, &String.contains?(&1, "😀🎉❤️")) == 5 + assert Enum.count(values, &String.contains?(&1, "mixed_")) == 5 + end + end + + describe "long-running operations" do + @tag :slow + @tag :flaky + test "long transaction doesn't cause timeout issues", %{test_db: test_db} do + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 5000) + + try do + # Start longer transaction + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value, duration) VALUES (?, ?)", + ["long", 100], + [], + trx_state + ) + + # Simulate some work + Process.sleep(100) + + {:ok, _committed_state} = EctoLibSql.Native.commit(trx_state) + after + EctoLibSql.disconnect([], state) + end + end + + @tag :slow + @tag :flaky + test "multiple concurrent transactions complete despite duration", %{test_db: test_db} do + tasks = + Enum.map(1..3, fn i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["trx_#{i}"], + [], + trx_state + ) + + # Hold transaction + Process.sleep(50) + + # Explicitly handle commit result to catch errors + case EctoLibSql.Native.commit(trx_state) do + {:ok, _committed_state} -> + {:ok, :committed} + + {:error, reason} -> + {:error, {:commit_failed, reason}} + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All commits should succeed; fail test if any error occurred + Enum.each(results, fn result -> + case result do + {:ok, :committed} -> + :ok + + {:error, {:commit_failed, reason}} -> + flunk("Transaction commit failed: #{inspect(reason)}") + + other -> + flunk("Unexpected result from concurrent transaction: #{inspect(other)}") + end + end) + + # Verify all inserts + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[3]] = result.rows + end + end + + describe "connection recovery" do + # Note: This test is sequential (not concurrent) and runs by default. + # It complements connection_recovery_test.exs by using file-based database. + test "connection recovers after query error", %{test_db: test_db} do + # Clear table first to ensure exact row count assertions + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _, _, state} = + EctoLibSql.handle_execute("DELETE FROM test_data", [], [], state) + + try do + # Successful insert + {:ok, _query, _result, state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["before"], + [], + state + ) + + # Force error (syntax) + error_result = EctoLibSql.handle_execute("INVALID SQL", [], [], state) + assert {:error, _reason, ^state} = error_result + + # Connection should still work + {:ok, _query, _result, ^state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["after"], + [], + state + ) + after + EctoLibSql.disconnect([], state) + end + + # Verify both successful inserts + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + assert [[2]] = result.rows + after + EctoLibSql.disconnect([], state) + end + end + + # Note: This test is sequential (not concurrent) and runs by default. + test "connection recovery with edge-case data (NULL, empty, large values)", %{ + test_db: test_db + } do + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + # Insert edge-case data before error + edge_values = generate_edge_case_values(1) + + Enum.each(edge_values, fn value -> + insert_edge_case_value(state, value) + end) + + # Cause error + error_result = EctoLibSql.handle_execute("MALFORMED SQL HERE", [], [], state) + assert {:error, _reason, ^state} = error_result + + # Insert more edge-case data after error to verify recovery + edge_values_2 = generate_edge_case_values(2) + + insert_results = + Enum.map(edge_values_2, fn value -> + insert_edge_case_value(state, value) + end) + + # All inserts should succeed + all_ok = Enum.all?(insert_results, fn r -> match?({:ok, _, _, _}, r) end) + assert all_ok + after + EctoLibSql.disconnect([], state) + end + + # Verify all edge-case data persisted + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + # Should have 10 rows (5 before error + 5 after) + assert [[10]] = result.rows + + # Verify NULL values + {:ok, _query, null_result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value IS NULL", + [], + [], + state + ) + + # Should have 2 NULL values + assert [[2]] = null_result.rows + after + EctoLibSql.disconnect([], state) + end + end + + @tag :slow + @tag :flaky + test "multiple connections recover independently from errors", %{test_db: test_db} do + tasks = + Enum.map(1..3, fn i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + # Insert before error + {:ok, _query, _result, state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["before_#{i}"], + [], + state + ) + + # Cause error (intentionally ignore it to test recovery) + # Discard error state - next operation uses original state + error_result = EctoLibSql.handle_execute("BAD SQL", [], [], state) + assert {:error, _reason, _state} = error_result + + # Recovery insert - verify it succeeds + case EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["after_#{i}"], + [], + state + ) do + {:ok, _query, _result, _state} -> + {:ok, :recovered} + + {:error, reason, _state} -> + {:error, {:recovery_failed, reason}} + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All recovery queries should succeed + Enum.each(results, fn result -> + case result do + {:ok, :recovered} -> + :ok + + {:error, {:recovery_failed, reason}} -> + flunk("Connection recovery insert failed: #{inspect(reason)}") + + other -> + flunk("Unexpected result from connection recovery task: #{inspect(other)}") + end + end) + + # Verify all inserts + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + # Should have 6 rows (3 before + 3 after) + assert [[6]] = result.rows + end + end + + describe "resource cleanup under load" do + @tag :slow + @tag :flaky + test "prepared statements cleaned up under concurrent load", %{test_db: test_db} do + tasks = + Enum.map(1..5, fn i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, stmt} = + EctoLibSql.Native.prepare( + state, + "INSERT INTO test_data (value) VALUES (?)" + ) + + try do + {:ok, _} = + EctoLibSql.Native.execute_stmt( + state, + stmt, + "INSERT INTO test_data (value) VALUES (?)", + ["prep_#{i}"] + ) + + {:ok, :prepared_and_cleaned} + after + # Always close the prepared statement, handle errors gracefully. + case EctoLibSql.Native.close_stmt(stmt) do + :ok -> + :ok + + {:error, reason} -> + Logger.debug( + "Error closing prepared statement #{inspect(stmt)}: #{inspect(reason)}" + ) + + :ok + end + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # Verify all prepared statement operations succeeded + Enum.each(results, fn result -> + case result do + {:ok, :prepared_and_cleaned} -> + :ok + + {:error, reason} -> + flunk("Prepared statement operation failed: #{inspect(reason)}") + + other -> + flunk("Unexpected result from prepared statement task: #{inspect(other)}") + end + end) + + # Verify all inserts succeeded + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[5]] = result.rows + end + + @tag :slow + @tag :flaky + test "prepared statements with edge-case data cleaned up correctly", %{ + test_db: test_db + } do + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, stmt} = + EctoLibSql.Native.prepare( + state, + "INSERT INTO test_data (value) VALUES (?)" + ) + + try do + # Execute prepared statement with edge-case data + edge_values = generate_edge_case_values(task_num) + + execute_results = + Enum.map(edge_values, fn value -> + EctoLibSql.Native.execute_stmt( + state, + stmt, + "INSERT INTO test_data (value) VALUES (?)", + [value] + ) + end) + + # All executions should succeed + all_ok = Enum.all?(execute_results, fn r -> match?({:ok, _}, r) end) + + if all_ok do + {:ok, :prepared_with_edge_cases} + else + {:error, :some_edge_case_inserts_failed} + end + after + # Always close the prepared statement, handle errors gracefully. + case EctoLibSql.Native.close_stmt(stmt) do + :ok -> + :ok + + {:error, reason} -> + Logger.debug( + "Error closing prepared statement #{inspect(stmt)}: #{inspect(reason)}" + ) + + :ok + end + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # Verify all prepared statement operations succeeded + Enum.each(results, fn result -> + case result do + {:ok, :prepared_with_edge_cases} -> + :ok + + {:error, reason} -> + flunk("Prepared statement with edge-case data failed: #{inspect(reason)}") + + other -> + flunk("Unexpected result from prepared statement edge-case task: #{inspect(other)}") + end + end) + + # Verify all inserts succeeded: 5 tasks × 5 edge cases = 25 rows + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + assert [[25]] = result.rows + + # Verify NULL values exist + {:ok, _query, null_result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value IS NULL", + [], + [], + state + ) + + # Should have 5 NULL values (one per task) + assert [[5]] = null_result.rows + after + EctoLibSql.disconnect([], state) + end + end + end + + describe "transaction isolation" do + @tag :slow + @tag :flaky + test "concurrent transactions don't interfere with each other", %{test_db: test_db} do + tasks = + Enum.map(1..4, fn i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["iso_#{i}"], + [], + trx_state + ) + + # Slight delay to increase overlap + Process.sleep(10) + + # Explicitly handle commit result to catch errors + case EctoLibSql.Native.commit(trx_state) do + {:ok, _committed_state} -> + {:ok, :committed} + + {:error, reason} -> + {:error, {:commit_failed, reason}} + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All commits should succeed; fail test if any error occurred + Enum.each(results, fn result -> + case result do + {:ok, :committed} -> + :ok + + {:error, {:commit_failed, reason}} -> + flunk("Concurrent transaction commit failed: #{inspect(reason)}") + + other -> + flunk("Unexpected result from concurrent transaction: #{inspect(other)}") + end + end) + + # All inserts should be visible + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[4]] = result.rows + end + + @tag :slow + @tag :flaky + test "concurrent transactions with edge-case data maintain isolation", %{test_db: test_db} do + # Each task inserts edge-case values in a transaction + tasks = + Enum.map(1..4, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + # Insert edge-case values within transaction, threading state through + edge_values = generate_edge_case_values(task_num) + + # Reduce with explicit error handling to surface failures clearly + with {:ok, final_trx_state} <- + Enum.reduce_while(edge_values, {:ok, trx_state}, fn value, acc -> + case acc do + {:ok, acc_state} -> + case insert_edge_case_value(acc_state, value) do + {:ok, _query, _result, new_state} -> + {:cont, {:ok, new_state}} + + {:error, reason, _state} -> + {:halt, {:error, {:insert_failed, reason}}} + end + + error -> + {:halt, error} + end + end) do + # Slight delay to increase overlap with other transactions + Process.sleep(10) + + # Commit the transaction containing all edge-case values + case EctoLibSql.Native.commit(final_trx_state) do + {:ok, _committed_state} -> + {:ok, :committed_with_edge_cases} + + {:error, reason} -> + {:error, {:commit_failed, reason}} + end + else + {:error, reason} -> + {:error, reason} + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All commits should succeed + Enum.each(results, fn result -> + case result do + {:ok, :committed_with_edge_cases} -> + :ok + + {:error, {:commit_failed, reason}} -> + flunk("Edge-case transaction commit failed: #{inspect(reason)}") + + other -> + flunk("Unexpected result from edge-case transaction: #{inspect(other)}") + end + end) + + # Verify all edge-case data was inserted: 4 tasks × 5 edge cases = 20 rows + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[20]] = result.rows + + # Verify NULL values survived transaction boundaries + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, null_result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value IS NULL", + [], + [], + state + ) + + EctoLibSql.disconnect([], state) + + # Should have 4 NULL values (one per task) + assert [[4]] = null_result.rows + end + end + + describe "concurrent load edge cases" do + @tag :slow + @tag :flaky + test "concurrent load with only NULL values", %{test_db: test_db} do + # Clear table first to ensure exact row count assertions + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _, _, state} = + EctoLibSql.handle_execute("DELETE FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + tasks = + Enum.map(1..10, fn _i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + EctoLibSql.handle_execute( + "INSERT INTO test_data (value, duration) VALUES (?, ?)", + [nil, nil], + [], + state + ) + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All should succeed + Enum.each(results, fn result -> + assert {:ok, _query, _result, _state} = result + end) + + # Verify all NULL inserts + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value IS NULL AND duration IS NULL", + [], + [], + state + ) + + EctoLibSql.disconnect([], state) + assert [[10]] = result.rows + end + + @tag :slow + @tag :flaky + test "concurrent load with only empty strings", %{test_db: test_db} do + # Clear table first to ensure exact row count assertions + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _, _, state} = + EctoLibSql.handle_execute("DELETE FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + tasks = + Enum.map(1..10, fn _i -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + [""], + [], + state + ) + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + Enum.each(results, fn result -> + assert {:ok, _query, _result, _state} = result + end) + + # Verify empty strings (not NULL) + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, empty_result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value = ''", + [], + [], + state + ) + + {:ok, _query, null_result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM test_data WHERE value IS NULL", + [], + [], + state + ) + + EctoLibSql.disconnect([], state) + + assert [[10]] = empty_result.rows + assert [[0]] = null_result.rows + end + + @tag :slow + @tag :flaky + test "concurrent load large dataset (100 rows per connection)", %{test_db: test_db} do + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 60_000) + + try do + # Insert 100 rows per task + results = + Enum.map(1..100, fn row_num -> + EctoLibSql.handle_execute( + "INSERT INTO test_data (value, duration) VALUES (?, ?)", + ["task_#{task_num}_row_#{row_num}", task_num * 100 + row_num], + [], + state + ) + end) + + all_ok = Enum.all?(results, fn r -> match?({:ok, _, _, _}, r) end) + if all_ok, do: {:ok, 100}, else: {:error, :some_failed} + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 60_000) + + # All tasks should succeed + Enum.each(results, fn result -> + assert {:ok, 100} = result + end) + + # Verify total row count: 5 tasks × 100 rows = 500 + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + assert [[500]] = result.rows + end + + @tag :slow + @tag :flaky + test "concurrent load with type conversion (ints, floats, strings)", %{test_db: test_db} do + # Add columns for different types + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, _result, _state} = + EctoLibSql.handle_execute( + "CREATE TABLE typed_data (id INTEGER PRIMARY KEY AUTOINCREMENT, int_val INTEGER, float_val REAL, text_val TEXT, timestamp_val TEXT)", + [], + [], + state + ) + + EctoLibSql.disconnect([], state) + + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + now = DateTime.utc_now() |> DateTime.to_iso8601() + + results = [ + # Integer values + EctoLibSql.handle_execute( + "INSERT INTO typed_data (int_val, float_val, text_val, timestamp_val) VALUES (?, ?, ?, ?)", + [task_num * 1000, task_num * 1.5, "text_#{task_num}", now], + [], + state + ), + # Negative integer + EctoLibSql.handle_execute( + "INSERT INTO typed_data (int_val, float_val, text_val, timestamp_val) VALUES (?, ?, ?, ?)", + [-task_num, -task_num * 0.5, "negative_#{task_num}", now], + [], + state + ), + # Zero values + EctoLibSql.handle_execute( + "INSERT INTO typed_data (int_val, float_val, text_val, timestamp_val) VALUES (?, ?, ?, ?)", + [0, 0.0, "", now], + [], + state + ), + # Large integer (using 1.0e307 instead of near-max Float64 to avoid platform-specific rounding) + EctoLibSql.handle_execute( + "INSERT INTO typed_data (int_val, float_val, text_val, timestamp_val) VALUES (?, ?, ?, ?)", + [9_223_372_036_854_775_807, 1.0e307, "max_#{task_num}", now], + [], + state + ) + ] + + all_ok = Enum.all?(results, fn r -> match?({:ok, _, _, _}, r) end) + if all_ok, do: {:ok, :types_inserted}, else: {:error, :type_insert_failed} + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + Enum.each(results, fn result -> + assert {:ok, :types_inserted} = result + end) + + # Verify type preservation + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute( + "SELECT int_val, float_val, text_val FROM typed_data WHERE int_val = 0 LIMIT 1", + [], + [], + state + ) + + EctoLibSql.disconnect([], state) + + [[int_val, float_val, text_val]] = result.rows + assert int_val == 0 + assert float_val == 0.0 + assert text_val == "" + end + end + + describe "transaction rollback under load" do + @tag :slow + @tag :flaky + test "concurrent transaction rollback leaves no data", %{test_db: test_db} do + # Clear any existing data + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + {:ok, _, _, state} = EctoLibSql.handle_execute("DELETE FROM test_data", [], [], state) + EctoLibSql.disconnect([], state) + + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + # Begin transaction + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + # Insert some data + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["rollback_test_#{task_num}"], + [], + trx_state + ) + + # Always rollback - data should not persist + case EctoLibSql.Native.rollback(trx_state) do + {:ok, _state} -> + {:ok, :rolled_back} + + {:error, reason} -> + {:error, {:rollback_failed, reason}} + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All rollbacks should succeed + Enum.each(results, fn result -> + assert {:ok, :rolled_back} = result + end) + + # Verify no data persisted + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[0]] = result.rows + end + + @tag :slow + @tag :flaky + test "mixed commit and rollback transactions maintain consistency", %{test_db: test_db} do + # Clear any existing data + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + {:ok, _, _, state} = EctoLibSql.handle_execute("DELETE FROM test_data", [], [], state) + EctoLibSql.disconnect([], state) + + # Even tasks commit, odd tasks rollback + tasks = + Enum.map(1..10, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO test_data (value) VALUES (?)", + ["task_#{task_num}"], + [], + trx_state + ) + + Process.sleep(5) + + if rem(task_num, 2) == 0 do + # Even tasks commit + case EctoLibSql.Native.commit(trx_state) do + {:ok, _state} -> {:ok, :committed} + {:error, reason} -> {:error, {:commit_failed, reason}} + end + else + # Odd tasks rollback + case EctoLibSql.Native.rollback(trx_state) do + {:ok, _state} -> {:ok, :rolled_back} + {:error, reason} -> {:error, {:rollback_failed, reason}} + end + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # Count commits and rollbacks + commits = Enum.count(results, fn r -> r == {:ok, :committed} end) + rollbacks = Enum.count(results, fn r -> r == {:ok, :rolled_back} end) + + assert commits == 5, "Should have 5 committed transactions" + assert rollbacks == 5, "Should have 5 rolled back transactions" + + # Verify only committed data exists (5 rows) + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[5]] = result.rows + end + + @tag :slow + @tag :flaky + test "transaction rollback after intentional constraint violation", %{test_db: test_db} do + # Create table with unique constraint + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, _result, _state} = + EctoLibSql.handle_execute( + "CREATE TABLE unique_test (id INTEGER PRIMARY KEY, unique_val TEXT UNIQUE)", + [], + [], + state + ) + + # Insert initial row + {:ok, _query, _result, _state} = + EctoLibSql.handle_execute( + "INSERT INTO unique_test (unique_val) VALUES (?)", + ["existing_value"], + [], + state + ) + + EctoLibSql.disconnect([], state) + + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + # Insert valid row + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO unique_test (unique_val) VALUES (?)", + ["task_#{task_num}_valid"], + [], + trx_state + ) + + # Try to insert duplicate - should fail + result = + EctoLibSql.handle_execute( + "INSERT INTO unique_test (unique_val) VALUES (?)", + ["existing_value"], + [], + trx_state + ) + + case result do + {:error, _reason, trx_state} -> + # Expected: constraint violation - assert rollback succeeds + case EctoLibSql.Native.rollback(trx_state) do + {:ok, _} -> {:ok, :correctly_rolled_back} + {:error, reason} -> {:error, {:rollback_failed, reason}} + end + + {:ok, _query, _result, trx_state} -> + # Unexpected: should have failed - still need to clean up + case EctoLibSql.Native.rollback(trx_state) do + {:ok, _} -> + {:error, :should_have_failed} + + {:error, reason} -> + {:error, {:unexpected_success_and_rollback_failed, reason}} + end + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All should have rolled back due to constraint violation + Enum.each(results, fn result -> + assert {:ok, :correctly_rolled_back} = result + end) + + # Verify only original row exists + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM unique_test", [], [], state) + + EctoLibSql.disconnect([], state) + + # Only the initial "existing_value" row should exist + assert [[1]] = result.rows + end + + @tag :slow + @tag :flaky + test "concurrent transactions with edge-case data and rollback", %{test_db: test_db} do + # Clear table + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + {:ok, _, _, state} = EctoLibSql.handle_execute("DELETE FROM test_data", [], [], state) + EctoLibSql.disconnect([], state) + + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + try do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + # Insert edge-case values in transaction, threading state through + edge_values = generate_edge_case_values(task_num) + + # Use reduce_while to defensively handle insert failures + with {:ok, final_trx_state} <- + Enum.reduce_while(edge_values, {:ok, trx_state}, fn value, acc -> + case acc do + {:ok, acc_state} -> + case insert_edge_case_value(acc_state, value) do + {:ok, _query, _result, new_state} -> + {:cont, {:ok, new_state}} + + {:error, reason, _state} -> + {:halt, {:error, {:insert_failed, value, reason}}} + end + + error -> + {:halt, error} + end + end) do + # Always rollback - edge-case data should not persist + case EctoLibSql.Native.rollback(final_trx_state) do + {:ok, _state} -> + {:ok, :edge_cases_rolled_back} + + {:error, reason} -> + {:error, {:rollback_failed, reason}} + end + else + {:error, reason} -> + {:error, {:edge_case_insertion_failed, reason}} + end + after + EctoLibSql.disconnect([], state) + end + end) + end) + + results = Task.await_many(tasks, 30_000) + + # All rollbacks should succeed + Enum.each(results, fn result -> + assert {:ok, :edge_cases_rolled_back} = result + end) + + # Verify no data persisted + {:ok, state} = EctoLibSql.connect(database: test_db, busy_timeout: 30_000) + + {:ok, _query, result, _state} = + EctoLibSql.handle_execute("SELECT COUNT(*) FROM test_data", [], [], state) + + EctoLibSql.disconnect([], state) + + assert [[0]] = result.rows + end + end +end diff --git a/test/pragma_test.exs b/test/pragma_test.exs index d47e023..1429e25 100644 --- a/test/pragma_test.exs +++ b/test/pragma_test.exs @@ -10,9 +10,7 @@ defmodule EctoLibSql.PragmaTest do on_exit(fn -> EctoLibSql.disconnect([], state) - File.rm(test_db) - File.rm(test_db <> "-shm") - File.rm(test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(test_db) end) {:ok, state: state} @@ -274,9 +272,7 @@ defmodule EctoLibSql.PragmaTest do # Clean up EctoLibSql.disconnect([], state2) - File.rm(test_db2) - File.rm(test_db2 <> "-wal") - File.rm(test_db2 <> "-shm") + EctoLibSql.TestHelpers.cleanup_db_files(test_db2) end end end diff --git a/test/prepared_statement_test.exs b/test/prepared_statement_test.exs index 07b1153..dedff38 100644 --- a/test/prepared_statement_test.exs +++ b/test/prepared_statement_test.exs @@ -29,9 +29,7 @@ defmodule EctoLibSql.PreparedStatementTest do on_exit(fn -> Native.close(state.conn_id, :conn_id) - File.rm(db_file) - File.rm(db_file <> "-shm") - File.rm(db_file <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(db_file) end) {:ok, state: state} @@ -310,6 +308,281 @@ defmodule EctoLibSql.PreparedStatementTest do end end + describe "statement reset and caching" do + test "reset statement for reuse without re-prepare", %{state: state} do + # Create logs table + {:ok, _query, _result, state} = + exec_sql(state, "CREATE TABLE logs (id INTEGER PRIMARY KEY AUTOINCREMENT, message TEXT)") + + # Prepare statement once + {:ok, stmt_id} = Native.prepare(state, "INSERT INTO logs (message) VALUES (?)") + + # Execute multiple times - statement caching handles reset automatically + for i <- 1..5 do + {:ok, _rows} = + Native.execute_stmt( + state, + stmt_id, + "INSERT INTO logs (message) VALUES (?)", + ["Log #{i}"] + ) + end + + # Verify all inserts succeeded + {:ok, _query, result, _state} = exec_sql(state, "SELECT COUNT(*) FROM logs") + assert [[5]] = result.rows + + # Cleanup + Native.close_stmt(stmt_id) + end + + test "reset clears parameter bindings", %{state: state} do + {:ok, stmt_id} = Native.prepare(state, "INSERT INTO users VALUES (?, ?, ?)") + + # Execute with parameters - automatic reset between calls + {:ok, _} = + Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ + 1, + "Alice", + "alice@example.com" + ]) + + # Execute with different parameters - no manual reset needed + {:ok, _} = + Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ + 2, + "Bob", + "bob@example.com" + ]) + + # Verify both inserts + {:ok, _query, result, _state} = exec_sql(state, "SELECT name FROM users ORDER BY id") + assert [["Alice"], ["Bob"]] = result.rows + + Native.close_stmt(stmt_id) + end + end + + describe "statement reset - explicit reset" do + test "reset_stmt clears statement state explicitly", %{state: state} do + {:ok, stmt_id} = Native.prepare(state, "INSERT INTO users VALUES (?, ?, ?)") + + # Execute first insertion + {:ok, _} = + Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ + 1, + "Alice", + "alice@example.com" + ]) + + # Explicitly reset the statement + assert :ok = Native.reset_stmt(state, stmt_id) + + # Execute second insertion after reset + {:ok, _} = + Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ + 2, + "Bob", + "bob@example.com" + ]) + + # Verify both inserts succeeded + {:ok, _query, result, _state} = exec_sql(state, "SELECT name FROM users ORDER BY id") + assert [["Alice"], ["Bob"]] = result.rows + + Native.close_stmt(stmt_id) + end + + test "reset_stmt can be called multiple times", %{state: state} do + {:ok, stmt_id} = Native.prepare(state, "INSERT INTO users VALUES (?, ?, ?)") + + # Execute and reset multiple times + for i <- 1..5 do + {:ok, _} = + Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ + i, + "User#{i}", + "user#{i}@example.com" + ]) + + # Explicit reset + assert :ok = Native.reset_stmt(state, stmt_id) + end + + # Verify all inserts + {:ok, _query, result, _state} = exec_sql(state, "SELECT COUNT(*) FROM users") + assert [[5]] = result.rows + + Native.close_stmt(stmt_id) + end + + test "reset_stmt returns error for invalid statement", %{state: state} do + # Try to reset non-existent statement + assert {:error, _reason} = Native.reset_stmt(state, "invalid_stmt_id") + end + end + + describe "statement get_stmt_columns - full metadata" do + test "get_stmt_columns returns column metadata", %{state: state} do + {:ok, stmt_id} = Native.prepare(state, "SELECT * FROM users WHERE id = ?") + + # Get full column metadata + {:ok, columns} = Native.get_stmt_columns(state, stmt_id) + + # Should return list of tuples: {name, origin_name, decl_type} + assert is_list(columns) + assert length(columns) == 3 + + # Verify column metadata structure + [ + {col1_name, col1_origin, col1_type}, + {col2_name, col2_origin, col2_type}, + {col3_name, col3_origin, col3_type} + ] = columns + + # Check column 1 (id) + assert col1_name == "id" + assert col1_origin == "id" + assert col1_type == "INTEGER" + + # Check column 2 (name) + assert col2_name == "name" + assert col2_origin == "name" + assert col2_type == "TEXT" + + # Check column 3 (email) + assert col3_name == "email" + assert col3_origin == "email" + assert col3_type == "TEXT" + + Native.close_stmt(stmt_id) + end + + test "get_stmt_columns works with aliased columns", %{state: state} do + {:ok, stmt_id} = + Native.prepare( + state, + "SELECT id as user_id, name as full_name, email as mail FROM users" + ) + + {:ok, columns} = Native.get_stmt_columns(state, stmt_id) + + assert length(columns) == 3 + + # Check aliased column names + [{col1_name, _, _}, {col2_name, _, _}, {col3_name, _, _}] = columns + + assert col1_name == "user_id" + assert col2_name == "full_name" + assert col3_name == "mail" + + Native.close_stmt(stmt_id) + end + + test "get_stmt_columns works with expressions", %{state: state} do + {:ok, stmt_id} = + Native.prepare( + state, + "SELECT COUNT(*) as total, MAX(id) as max_id FROM users" + ) + + {:ok, columns} = Native.get_stmt_columns(state, stmt_id) + + assert length(columns) == 2 + + [{col1_name, _, _}, {col2_name, _, _}] = columns + + assert col1_name == "total" + assert col2_name == "max_id" + + Native.close_stmt(stmt_id) + end + + test "get_stmt_columns returns error for invalid statement", %{state: state} do + # Try to get columns for non-existent statement + assert {:error, _reason} = Native.get_stmt_columns(state, "invalid_stmt_id") + end + end + + describe "statement parameter introspection" do + test "parameter_count with named parameters", %{state: state} do + # Test with colon-style named parameters (:name) + {:ok, stmt_id} = + Native.prepare( + state, + "INSERT INTO users (id, name, email) VALUES (:id, :name, :email)" + ) + + # Get parameter names (note: SQLite uses 1-based indexing) + {:ok, param1} = Native.stmt_parameter_name(state, stmt_id, 1) + assert param1 == ":id" + + {:ok, param2} = Native.stmt_parameter_name(state, stmt_id, 2) + assert param2 == ":name" + + {:ok, param3} = Native.stmt_parameter_name(state, stmt_id, 3) + assert param3 == ":email" + + Native.close_stmt(stmt_id) + end + + test "parameter_name returns nil for positional parameters", %{state: state} do + {:ok, stmt_id} = + Native.prepare(state, "SELECT * FROM users WHERE name = ? AND email = ?") + + # Positional parameters should return nil + {:ok, param1} = Native.stmt_parameter_name(state, stmt_id, 1) + assert param1 == nil + + {:ok, param2} = Native.stmt_parameter_name(state, stmt_id, 2) + assert param2 == nil + + Native.close_stmt(stmt_id) + end + + test "parameter_name supports dollar-style parameters", %{state: state} do + # Test with dollar-style named parameters ($name) + {:ok, stmt_id} = + Native.prepare(state, "SELECT * FROM users WHERE id = $id AND name = $name") + + {:ok, param1} = Native.stmt_parameter_name(state, stmt_id, 1) + assert param1 == "$id" + + {:ok, param2} = Native.stmt_parameter_name(state, stmt_id, 2) + assert param2 == "$name" + + Native.close_stmt(stmt_id) + end + + test "parameter_name supports at-style parameters", %{state: state} do + # Test with at-style named parameters (@name) + {:ok, stmt_id} = + Native.prepare(state, "SELECT * FROM users WHERE id = @id AND name = @name") + + {:ok, param1} = Native.stmt_parameter_name(state, stmt_id, 1) + assert param1 == "@id" + + {:ok, param2} = Native.stmt_parameter_name(state, stmt_id, 2) + assert param2 == "@name" + + Native.close_stmt(stmt_id) + end + + test "parameter_name handles mixed positional and named parameters", %{state: state} do + # SQLite allows mixing positional and named parameters + {:ok, stmt_id} = + Native.prepare(state, "SELECT * FROM users WHERE id = :id AND name = ?") + + {:ok, param1} = Native.stmt_parameter_name(state, stmt_id, 1) + assert param1 == ":id" + + {:ok, param2} = Native.stmt_parameter_name(state, stmt_id, 2) + assert param2 == nil + + Native.close_stmt(stmt_id) + end + end + describe "statement binding behaviour (ported from ecto_sql)" do test "prepared statement auto-reset of bindings between executions", %{state: state} do # Source: ecto_sql prepared statement tests @@ -456,9 +729,246 @@ defmodule EctoLibSql.PreparedStatementTest do end) # No assertions on memory (platform-dependent) - # This test documents expected behavior and can catch memory leaks in manual testing + # This test documents expected behaviour and can catch memory leaks in manual testing :ok = Native.close_stmt(stmt_id) end end + + describe "concurrent prepared statement usage" do + test "multiple processes can use different prepared statements concurrently", %{ + state: state + } do + # Setup: Insert test data + Enum.each(1..10, fn i -> + {:ok, _query, _result, _} = + exec_sql(state, "INSERT INTO users (id, name, email) VALUES (?, ?, ?)", [ + i, + "User#{i}", + "user#{i}@example.com" + ]) + end) + + # Prepare multiple statements + {:ok, stmt_select_id} = Native.prepare(state, "SELECT * FROM users WHERE id = ?") + {:ok, stmt_select_name} = Native.prepare(state, "SELECT * FROM users WHERE name = ?") + + # Create multiple tasks executing different prepared statements concurrently + tasks = + Enum.map(1..5, fn i -> + Task.async(fn -> + # Each task executes SELECT by ID + {:ok, result_id} = Native.query_stmt(state, stmt_select_id, [i]) + assert length(result_id.rows) == 1 + + # Each task executes SELECT by name + {:ok, result_name} = Native.query_stmt(state, stmt_select_name, ["User#{i}"]) + assert length(result_name.rows) == 1 + + # Verify both queries return same data + assert hd(result_id.rows) == hd(result_name.rows) + + :ok + end) + end) + + # Wait for all tasks to complete successfully + results = Task.await_many(tasks, 5000) + assert Enum.all?(results, &(&1 == :ok)) + + # Cleanup + Native.close_stmt(stmt_select_id) + Native.close_stmt(stmt_select_name) + end + + test "single prepared statement can be safely used by multiple processes", %{state: state} do + # Setup: Insert test data + Enum.each(1..20, fn i -> + {:ok, _query, _result, _} = + exec_sql(state, "INSERT INTO users (id, name, email) VALUES (?, ?, ?)", [ + i, + "User#{i}", + "user#{i}@example.com" + ]) + end) + + # Prepare a single statement to be shared across tasks + {:ok, stmt_id} = Native.prepare(state, "SELECT * FROM users WHERE id = ?") + + # Create multiple concurrent tasks using the same prepared statement + tasks = + Enum.map(1..10, fn task_num -> + Task.async(fn -> + # Each task queries a different ID with the same prepared statement + {:ok, result} = Native.query_stmt(state, stmt_id, [task_num]) + assert length(result.rows) == 1 + + [id, name, email] = hd(result.rows) + assert id == task_num + assert name == "User#{task_num}" + assert String.contains?(email, "@example.com") + + # Simulate some work + Process.sleep(10) + + :ok + end) + end) + + # Wait for all tasks to complete successfully + results = Task.await_many(tasks, 5000) + assert Enum.all?(results, &(&1 == :ok)) + + # Verify data integrity - statement should work correctly after concurrent access + {:ok, final_result} = Native.query_stmt(state, stmt_id, [5]) + assert hd(final_result.rows) == [5, "User5", "user5@example.com"] + + # Cleanup + Native.close_stmt(stmt_id) + end + + test "concurrent writes with prepared statements maintain consistency", %{state: state} do + # Setup: Create initial user + {:ok, _query, _result, _} = + exec_sql(state, "INSERT INTO users (id, name, email) VALUES (?, ?, ?)", [ + 1, + "Initial", + "initial@example.com" + ]) + + # Prepare statements for reading and writing + {:ok, stmt_select} = Native.prepare(state, "SELECT COUNT(*) FROM users") + + {:ok, stmt_insert} = + Native.prepare(state, "INSERT INTO users (id, name, email) VALUES (?, ?, ?)") + + # Create tasks that concurrently write data + tasks = + Enum.map(2..6, fn user_id -> + Task.async(fn -> + # Each task inserts a new user using the prepared statement + {:ok, _rows} = + Native.execute_stmt( + state, + stmt_insert, + "INSERT INTO users (id, name, email) VALUES (?, ?, ?)", + [user_id, "User#{user_id}", "user#{user_id}@example.com"] + ) + + :ok + end) + end) + + # Wait for all writes to complete + Task.await_many(tasks, 5000) + + # Verify final count (initial + 5 new users) + {:ok, count_result} = Native.query_stmt(state, stmt_select, []) + assert hd(hd(count_result.rows)) == 6 + + # Cleanup + Native.close_stmt(stmt_select) + Native.close_stmt(stmt_insert) + end + + test "prepared statements handle parameter isolation across concurrent tasks", %{ + state: state + } do + # Setup: Create test data + Enum.each(1..5, fn i -> + {:ok, _query, _result, _} = + exec_sql(state, "INSERT INTO users (id, name, email) VALUES (?, ?, ?)", [ + i, + "User#{i}", + "user#{i}@example.com" + ]) + end) + + {:ok, stmt_id} = Native.prepare(state, "SELECT ? as param_test, id FROM users WHERE id = ?") + + # Create tasks with different parameter combinations + tasks = + Enum.map(1..5, fn task_id -> + Task.async(fn -> + # Each task uses different parameters + {:ok, result} = Native.query_stmt(state, stmt_id, ["Task#{task_id}", task_id]) + assert length(result.rows) == 1 + + [param_value, id] = hd(result.rows) + # Verify the parameter was not contaminated from another task + assert param_value == "Task#{task_id}", + "Parameter #{param_value} should be Task#{task_id}" + + assert id == task_id + + :ok + end) + end) + + # Wait for all tasks to complete successfully + results = Task.await_many(tasks, 5000) + assert Enum.all?(results, &(&1 == :ok)) + + # Cleanup + Native.close_stmt(stmt_id) + end + + test "prepared statements maintain isolation when reset concurrently", %{state: state} do + # Setup: Create test data (IDs 1-10) + Enum.each(1..10, fn i -> + {:ok, _query, _result, _} = + exec_sql(state, "INSERT INTO users (id, name, email) VALUES (?, ?, ?)", [ + i, + "User#{i}", + "user#{i}@example.com" + ]) + end) + + {:ok, stmt_id} = Native.prepare(state, "SELECT * FROM users WHERE id = ?") + + # Create multiple tasks that will reset the statement concurrently + tasks = + Enum.map(1..5, fn task_num -> + Task.async(fn -> + # Each task executes and resets the statement + {:ok, result} = Native.query_stmt(state, stmt_id, [task_num]) + assert length(result.rows) == 1 + + [id, name, email] = hd(result.rows) + assert id == task_num + assert name == "User#{task_num}" + assert email == "user#{task_num}@example.com" + + # Explicitly reset statement to clear bindings + :ok = Native.reset_stmt(state, stmt_id) + + # Execute again after reset - should query IDs 6-10 + {:ok, result2} = Native.query_stmt(state, stmt_id, [task_num + 5]) + + # After reset, prepared statement must return the correct row + assert length(result2.rows) == 1, "Should get exactly one row after reset" + + [new_id, new_name, new_email] = hd(result2.rows) + + assert new_id == task_num + 5, + "ID should be #{task_num + 5}, got #{new_id}" + + assert new_name == "User#{task_num + 5}", + "Name should be User#{task_num + 5}, got #{new_name}" + + assert new_email == "user#{task_num + 5}@example.com", + "Email should be user#{task_num + 5}@example.com, got #{new_email}" + + :ok + end) + end) + + # Wait for all tasks to complete successfully + results = Task.await_many(tasks, 5000) + assert Enum.all?(results, &(&1 == :ok)) + + # Cleanup + Native.close_stmt(stmt_id) + end + end end diff --git a/test/replication_integration_test.exs b/test/replication_integration_test.exs index 8b16dab..fee4b75 100644 --- a/test/replication_integration_test.exs +++ b/test/replication_integration_test.exs @@ -52,9 +52,7 @@ defmodule EctoLibSql.ReplicationIntegrationTest do on_exit(fn -> EctoLibSql.disconnect([], state) - File.rm(test_db) - File.rm(test_db <> "-shm") - File.rm(test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(test_db) end) {:ok, state: state} diff --git a/test/rtree_test.exs b/test/rtree_test.exs index eb45c5a..f50b8f9 100644 --- a/test/rtree_test.exs +++ b/test/rtree_test.exs @@ -12,9 +12,7 @@ defmodule Ecto.RTreeTest do setup_all do # Clean up any existing test database files - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) # Start the test repo {:ok, _} = TestRepo.start_link(database: @test_db) @@ -27,9 +25,7 @@ defmodule Ecto.RTreeTest do _, _ -> nil end - File.rm(@test_db) - File.rm(@test_db <> "-shm") - File.rm(@test_db <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) :ok diff --git a/test/savepoint_replication_test.exs b/test/savepoint_replication_test.exs new file mode 100644 index 0000000..d0342a3 --- /dev/null +++ b/test/savepoint_replication_test.exs @@ -0,0 +1,277 @@ +defmodule EctoLibSql.SavepointReplicationTest do + @moduledoc """ + Tests for savepoint behaviour when used with replication/remote sync. + + Focused on critical integration scenarios: + 1. Savepoints work correctly in replica mode with sync enabled + 2. Savepoint rollback doesn't interfere with remote sync + 3. Error recovery with savepoints in replicated transactions + + These tests require TURSO_DB_URI and TURSO_AUTH_TOKEN for remote testing. + Tests are skipped if credentials are not provided. + """ + use ExUnit.Case + + @turso_uri System.get_env("TURSO_DB_URI") + @turso_token System.get_env("TURSO_AUTH_TOKEN") + + # Skip tests if Turso credentials aren't provided + @moduletag skip: is_nil(@turso_uri) || is_nil(@turso_token) + + setup do + unique_id = :erlang.unique_integer([:positive]) + test_db = "z_ecto_libsql_test-savepoint_replication_#{unique_id}.db" + test_table = "test_users_#{unique_id}" + + {:ok, state} = + if not (is_nil(@turso_uri) || is_nil(@turso_token)) do + # Connect with replica mode for replication + EctoLibSql.connect( + database: test_db, + uri: @turso_uri, + auth_token: @turso_token, + sync: true + ) + else + # Fallback to local (tests will skip) + EctoLibSql.connect(database: test_db) + end + + # Create unique test table for this test + {:ok, _query, _result, state} = + EctoLibSql.handle_execute( + "CREATE TABLE #{test_table} (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)", + [], + [], + state + ) + + on_exit(fn -> + # Cleanup: drop remote table, disconnect, and remove local files + # Errors are ignored to ensure cleanup never blocks + for cleanup_fn <- [ + fn -> + EctoLibSql.handle_execute("DROP TABLE IF EXISTS #{test_table}", [], [], state) + end, + fn -> EctoLibSql.disconnect([], state) end, + fn -> EctoLibSql.TestHelpers.cleanup_db_files(test_db) end + ] do + try do + cleanup_fn.() + rescue + _ -> :ok + end + end + end) + + {:ok, state: state, table: test_table} + end + + describe "savepoints in replica mode with sync" do + test "basic savepoint operation works with replica sync enabled", %{ + state: state, + table: table + } do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + # Create savepoint + :ok = EctoLibSql.Native.create_savepoint(trx_state, "sp1") + + # Execute within savepoint + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (name) VALUES (?)", + ["Alice"], + [], + trx_state + ) + + # Release and commit (which syncs to remote) + :ok = EctoLibSql.Native.release_savepoint_by_name(trx_state, "sp1") + {:ok, committed_state} = EctoLibSql.Native.commit(trx_state) + + # Verify sync occurred by checking replication frame number advanced + {:ok, frame_number} = EctoLibSql.Native.max_write_replication_index(committed_state) + assert is_integer(frame_number) && frame_number > 0 + + # Verify data persisted locally + {:ok, _query, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM #{table}", + [], + [], + state + ) + + assert [[1]] = result.rows + end + + test "savepoint rollback with remote sync preserves outer transaction", %{ + state: state, + table: table + } do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + # Outer transaction: insert Alice + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (name) VALUES (?)", + ["Alice"], + [], + trx_state + ) + + # Savepoint: insert Bob and rollback + :ok = EctoLibSql.Native.create_savepoint(trx_state, "sp1") + + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (name) VALUES (?)", + ["Bob"], + [], + trx_state + ) + + :ok = EctoLibSql.Native.rollback_to_savepoint_by_name(trx_state, "sp1") + + # Commit (syncs to remote) + {:ok, committed_state} = EctoLibSql.Native.commit(trx_state) + + # Verify sync occurred + {:ok, frame_number} = EctoLibSql.Native.max_write_replication_index(committed_state) + assert is_integer(frame_number) && frame_number > 0 + + # Only Alice should exist + {:ok, _query, result, _state} = + EctoLibSql.handle_execute( + "SELECT name FROM #{table} ORDER BY name", + [], + [], + state + ) + + assert result.rows == [["Alice"]] + end + + test "nested savepoints work correctly with remote sync", %{state: state, table: table} do + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + # Level 0: Insert Alice + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (name) VALUES (?)", + ["Alice"], + [], + trx_state + ) + + # Level 1: Savepoint sp1 + :ok = EctoLibSql.Native.create_savepoint(trx_state, "sp1") + + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (name) VALUES (?)", + ["Bob"], + [], + trx_state + ) + + # Level 2: Savepoint sp2 + :ok = EctoLibSql.Native.create_savepoint(trx_state, "sp2") + + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (name) VALUES (?)", + ["Charlie"], + [], + trx_state + ) + + # Rollback sp2 (removes Charlie, keeps Alice and Bob) + :ok = EctoLibSql.Native.rollback_to_savepoint_by_name(trx_state, "sp2") + + # Commit (syncs to remote) + {:ok, committed_state} = EctoLibSql.Native.commit(trx_state) + + # Verify sync occurred + {:ok, frame_number} = EctoLibSql.Native.max_write_replication_index(committed_state) + assert is_integer(frame_number) && frame_number > 0 + + # Alice and Bob should exist + {:ok, _query, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM #{table}", + [], + [], + state + ) + + assert [[2]] = result.rows + end + end + + describe "savepoint error recovery with remote sync" do + test "savepoint enables error recovery in replicated transactions", %{ + state: state, + table: table + } do + # Insert a row with specific ID for constraint violation test + {:ok, _query, _result, state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (id, name) VALUES (?, ?)", + [100, "PreExisting"], + [], + state + ) + + # Start transaction with savepoint + {:ok, trx_state} = EctoLibSql.Native.begin(state) + + :ok = EctoLibSql.Native.create_savepoint(trx_state, "sp1") + + # Try to insert duplicate (will fail with PRIMARY KEY constraint violation) + result = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (id, name) VALUES (?, ?)", + [100, "Duplicate"], + [], + trx_state + ) + + # Rebind trx_state - error tuple contains updated transaction state needed for recovery + # Assert the error is specifically a constraint violation (UNIQUE or PRIMARY KEY) + assert {:error, reason, trx_state} = result + assert reason =~ "UNIQUE constraint failed" || reason =~ "PRIMARY KEY" + + # Rollback savepoint to recover + :ok = EctoLibSql.Native.rollback_to_savepoint_by_name(trx_state, "sp1") + + # Insert different row + {:ok, _query, _result, trx_state} = + EctoLibSql.handle_execute( + "INSERT INTO #{table} (name) VALUES (?)", + ["NewRow"], + [], + trx_state + ) + + # Commit (syncs to remote) + {:ok, committed_state} = EctoLibSql.Native.commit(trx_state) + + # Verify sync occurred + {:ok, frame_number} = EctoLibSql.Native.max_write_replication_index(committed_state) + assert is_integer(frame_number) && frame_number > 0 + + # Both original and new should exist + {:ok, _query, result, _state} = + EctoLibSql.handle_execute( + "SELECT COUNT(*) FROM #{table}", + [], + [], + state + ) + + assert [[2]] = result.rows + end + end +end diff --git a/test/savepoint_test.exs b/test/savepoint_test.exs index 57abfe5..fb80797 100644 --- a/test/savepoint_test.exs +++ b/test/savepoint_test.exs @@ -35,9 +35,7 @@ defmodule EctoLibSql.SavepointTest do on_exit(fn -> Native.close(state.conn_id, :conn_id) - File.rm(db_file) - File.rm(db_file <> "-shm") - File.rm(db_file <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(db_file) end) {:ok, state: state} @@ -341,6 +339,7 @@ defmodule EctoLibSql.SavepointTest do "Bob" ]) + # Discard error state - next operation uses original trx_state assert {:error, _reason, _state} = result # Rollback savepoint to recover diff --git a/test/security_test.exs b/test/security_test.exs index a0f4cb0..91dcdd8 100644 --- a/test/security_test.exs +++ b/test/security_test.exs @@ -3,10 +3,7 @@ defmodule EctoLibSql.SecurityTest do # Helper to clean up database files and associated WAL/SHM files. defp cleanup_db(db_path) do - File.rm(db_path) - File.rm(db_path <> "-wal") - File.rm(db_path <> "-shm") - File.rm(db_path <> "-journal") + EctoLibSql.TestHelpers.cleanup_db_files(db_path) end describe "Transaction Isolation ✅" do diff --git a/test/smoke_test.exs b/test/smoke_test.exs new file mode 100644 index 0000000..08a85cb --- /dev/null +++ b/test/smoke_test.exs @@ -0,0 +1,166 @@ +defmodule EctoLibSqlSmokeTest do + @moduledoc """ + Basic smoke tests for EctoLibSql. + + These are minimal sanity checks to verify core functionality works. + More comprehensive tests are in specialized test files: + - prepared_statement_test.exs - Prepared statements + - vector_geospatial_test.exs - Vector and R*Tree features + - savepoint_test.exs - Transactions and savepoints + - ecto_migration_test.exs - Migrations + """ + use ExUnit.Case + doctest EctoLibSql + + setup_all do + # Clean up any existing test database from previous runs + EctoLibSql.TestHelpers.cleanup_db_files("z_ecto_libsql_test-smoke.db") + + on_exit(fn -> + # Clean up at end of all tests too + EctoLibSql.TestHelpers.cleanup_db_files("z_ecto_libsql_test-smoke.db") + end) + + :ok + end + + setup do + # Create a unique database file for each test to ensure isolation + test_db = "z_ecto_libsql_test-#{:erlang.unique_integer([:positive])}.db" + + opts = [ + uri: System.get_env("LIBSQL_URI"), + auth_token: System.get_env("LIBSQL_TOKEN"), + database: test_db, + sync: true + ] + + # Clean up database file after test completes + on_exit(fn -> + EctoLibSql.TestHelpers.cleanup_db_files(test_db) + end) + + {:ok, opts: opts} + end + + describe "basic connectivity" do + test "can connect to database", state do + assert {:ok, _conn} = EctoLibSql.connect(state[:opts]) + end + + test "can ping connection", state do + {:ok, conn} = EctoLibSql.connect(state[:opts]) + assert {:ok, _conn} = EctoLibSql.ping(conn) + end + + test "can disconnect", state do + {:ok, conn} = EctoLibSql.connect(state[:opts]) + assert :ok = EctoLibSql.disconnect([], conn) + end + end + + describe "basic queries" do + test "can execute a simple select", state do + {:ok, conn} = EctoLibSql.connect(state[:opts]) + query = %EctoLibSql.Query{statement: "SELECT 1 + 1"} + assert {:ok, _query, _result, _conn} = EctoLibSql.handle_execute(query, [], [], conn) + end + + test "handles invalid SQL with error", state do + {:ok, conn} = EctoLibSql.connect(state[:opts]) + query = %EctoLibSql.Query{statement: "SELECT * FROM not_existing_table"} + + assert {:error, %EctoLibSql.Error{}, _conn} = + EctoLibSql.handle_execute(query, [], [], conn) + end + + test "can execute multiple statements", state do + {:ok, conn} = EctoLibSql.connect(state[:opts]) + + # Create table first + create_table = %EctoLibSql.Query{ + statement: + "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, email TEXT)" + } + + {:ok, _query, _result, conn} = EctoLibSql.handle_execute(create_table, [], [], conn) + + # Multiple statements in one execution + multi_stmt = %EctoLibSql.Query{ + statement: """ + INSERT INTO users (name, email) VALUES ('test', 'test@mail.com'); + SELECT * FROM users WHERE name = 'test'; + """ + } + + assert {:ok, _query, _result, _conn} = EctoLibSql.handle_execute(multi_stmt, [], [], conn) + end + end + + describe "basic transaction" do + test "can begin, execute, and commit", state do + {:ok, conn} = EctoLibSql.connect(state[:opts]) + + # Create table first + create = %EctoLibSql.Query{ + statement: + "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, email TEXT)" + } + + {:ok, _query, _result, conn} = EctoLibSql.handle_execute(create, [], [], conn) + + # Begin transaction + {:ok, _begin_result, conn} = EctoLibSql.handle_begin([], conn) + + # Insert data + insert = %EctoLibSql.Query{statement: "INSERT INTO users (name, email) VALUES (?, ?)"} + + {:ok, _query, _result, conn} = + EctoLibSql.handle_execute(insert, ["Alice", "alice@example.com"], [], conn) + + # Commit + assert {:ok, _commit_result, _conn} = EctoLibSql.handle_commit([], conn) + end + + test "can begin, execute, and rollback", state do + {:ok, conn} = EctoLibSql.connect(state[:opts]) + + # Create table first + create = %EctoLibSql.Query{ + statement: + "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, email TEXT)" + } + + {:ok, _query, _result, conn} = EctoLibSql.handle_execute(create, [], [], conn) + + # Insert initial data to verify rollback doesn't affect pre-transaction data + insert_initial = %EctoLibSql.Query{ + statement: "INSERT INTO users (name, email) VALUES (?, ?)" + } + + {:ok, _query, _result, conn} = + EctoLibSql.handle_execute(insert_initial, ["Bob", "bob@example.com"], [], conn) + + # Begin transaction + {:ok, _begin_result, conn} = EctoLibSql.handle_begin([], conn) + + # Insert data in transaction + insert_txn = %EctoLibSql.Query{ + statement: "INSERT INTO users (name, email) VALUES (?, ?)" + } + + {:ok, _query, _result, conn} = + EctoLibSql.handle_execute(insert_txn, ["Charlie", "charlie@example.com"], [], conn) + + # Rollback transaction + {:ok, _rollback_result, conn} = EctoLibSql.handle_rollback([], conn) + + # Verify only initial data exists (rollback worked) + select = %EctoLibSql.Query{statement: "SELECT COUNT(*) FROM users"} + {:ok, _query, result, _conn} = EctoLibSql.handle_execute(select, [], [], conn) + + # Should have only 1 row (Bob), not 2 (Bob and Charlie) + assert [[1]] = result.rows + end + end +end diff --git a/test/statement_features_test.exs b/test/statement_features_test.exs deleted file mode 100644 index 95a724d..0000000 --- a/test/statement_features_test.exs +++ /dev/null @@ -1,836 +0,0 @@ -defmodule EctoLibSql.StatementFeaturesTest do - @moduledoc """ - Tests for prepared statement features. - - Includes: - - Basic prepare/execute - - Statement introspection: columns(), parameter_count() - - Statement reset() for reuse - """ - use ExUnit.Case - - setup do - test_db = "z_ecto_libsql_test-stmt_#{:erlang.unique_integer([:positive])}.db" - - {:ok, state} = EctoLibSql.connect(database: test_db) - - # Create a test table - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT, age INTEGER)", - [], - [], - state - ) - - on_exit(fn -> - EctoLibSql.disconnect([], state) - File.rm(test_db) - File.rm(test_db <> "-shm") - File.rm(test_db <> "-wal") - end) - - {:ok, state: state} - end - - describe "Statement.columns()" do - test "get column metadata from prepared statement", %{state: state} do - # Prepare statement - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE id = ?") - - # Get column count - {:ok, count} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - assert count == 3 - - # Get column names using helper function - names = get_column_names(state, stmt_id, count) - assert names == ["id", "name", "age"] - - # Cleanup - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "columns work with complex queries", %{state: state} do - # Create posts table - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE posts (id INTEGER PRIMARY KEY, user_id INTEGER, title TEXT)", - [], - [], - state - ) - - # Prepare complex query - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT - u.id as user_id, - u.name, - COUNT(p.id) as post_count - FROM users u - LEFT JOIN posts p ON u.id = p.user_id - GROUP BY u.id - """ - ) - - # Get column count - {:ok, count} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - assert count == 3 - - # Get column names using helper function - names = get_column_names(state, stmt_id, count) - assert names == ["user_id", "name", "post_count"] - - # Cleanup - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "stmt_column_name handles out-of-bounds and valid indices", %{state: state} do - # Prepare statement - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE id = ?") - - # Get column count - {:ok, count} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - assert count == 3 - - # Valid indices (0 to count-1) should succeed - {:ok, name_0} = EctoLibSql.Native.stmt_column_name(state, stmt_id, 0) - assert name_0 == "id" - - {:ok, name_2} = EctoLibSql.Native.stmt_column_name(state, stmt_id, 2) - assert name_2 == "age" - - # Out-of-bounds indices should return error - assert {:error, _reason} = EctoLibSql.Native.stmt_column_name(state, stmt_id, count) - assert {:error, _reason} = EctoLibSql.Native.stmt_column_name(state, stmt_id, 100) - - # Cleanup - EctoLibSql.Native.close_stmt(stmt_id) - end - end - - # ============================================================================ - # NOTE: query_row() is NOT in the libsql Rust crate API - # It's an Elixir convenience function that doesn't exist upstream - # Users should use query_stmt() and take the first row if needed - # Removed to keep tests aligned with actual libsql features - # ============================================================================ - - # ============================================================================ - # Statement.reset() - NOT IMPLEMENTED ❌ - # ============================================================================ - - describe "Statement reset and caching ✅" do - test "reset statement for reuse without re-prepare", %{state: state} do - # Create logs table - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE logs (id INTEGER PRIMARY KEY AUTOINCREMENT, message TEXT)", - [], - [], - state - ) - - # Prepare statement once - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "INSERT INTO logs (message) VALUES (?)") - - # Execute multiple times - statement caching handles reset automatically - for i <- 1..5 do - {:ok, _rows} = - EctoLibSql.Native.execute_stmt( - state, - stmt_id, - "INSERT INTO logs (message) VALUES (?)", - ["Log #{i}"] - ) - end - - # Verify all inserts succeeded - {:ok, _query, result, _state} = - EctoLibSql.handle_execute("SELECT COUNT(*) FROM logs", [], [], state) - - assert [[5]] = result.rows - - # Cleanup - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "reset clears parameter bindings", %{state: state} do - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "INSERT INTO users VALUES (?, ?, ?)") - - # Execute with parameters - automatic reset between calls - {:ok, _} = - EctoLibSql.Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ - 1, - "Alice", - 30 - ]) - - # Execute with different parameters - no manual reset needed - {:ok, _} = - EctoLibSql.Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ - 2, - "Bob", - 25 - ]) - - # Verify both inserts - {:ok, _query, result, _state} = - EctoLibSql.handle_execute("SELECT name FROM users ORDER BY id", [], [], state) - - assert [["Alice"], ["Bob"]] = result.rows - - EctoLibSql.Native.close_stmt(stmt_id) - end - - @tag :flaky - test "statement caching improves performance vs re-prepare", %{state: state} do - sql = "INSERT INTO users VALUES (?, ?, ?)" - - # Time cached prepared statement (prepare once, execute many times) - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, sql) - - {time_with_cache, _} = - :timer.tc(fn -> - for i <- 1..100 do - EctoLibSql.Native.execute_stmt(state, stmt_id, sql, [i, "User#{i}", 20 + i]) - end - end) - - EctoLibSql.Native.close_stmt(stmt_id) - - # Verify all inserts succeeded - {:ok, _query, result, _state} = - EctoLibSql.handle_execute("SELECT COUNT(*) FROM users", [], [], state) - - assert [[100]] = result.rows - - # Clear for next benchmark - {:ok, _query, _result, state} = - EctoLibSql.handle_execute("DELETE FROM users", [], [], state) - - # Time re-prepare approach (prepare and close each time) - {time_with_prepare, _} = - :timer.tc(fn -> - for i <- 1..100 do - {:ok, stmt} = EctoLibSql.Native.prepare(state, sql) - EctoLibSql.Native.execute_stmt(state, stmt, sql, [i + 100, "User#{i}", 20 + i]) - EctoLibSql.Native.close_stmt(stmt) - end - end) - - # Caching should provide measurable benefit (at least not worse on average) - # Note: allowing significant variance for CI/test environments - # On GitHub Actions and other CI platforms, performance can vary wildly - ratio = time_with_cache / time_with_prepare - - # Very lenient threshold for CI environments - just verify caching doesn't - # make things dramatically worse (10x threshold instead of 2x) - assert ratio <= 10, - "Cached statements should not be dramatically slower than re-prepare (got #{ratio}x)" - end - end - - # ============================================================================ - # Statement.reset() - NEW IMPLEMENTATION ✅ - # ============================================================================ - - describe "Statement.reset() explicit reset ✅" do - test "reset_stmt clears statement state explicitly", %{state: state} do - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "INSERT INTO users VALUES (?, ?, ?)") - - # Execute first insertion - {:ok, _} = - EctoLibSql.Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ - 1, - "Alice", - 30 - ]) - - # Explicitly reset the statement - assert :ok = EctoLibSql.Native.reset_stmt(state, stmt_id) - - # Execute second insertion after reset - {:ok, _} = - EctoLibSql.Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ - 2, - "Bob", - 25 - ]) - - # Verify both inserts succeeded - {:ok, _query, result, _state} = - EctoLibSql.handle_execute("SELECT name FROM users ORDER BY id", [], [], state) - - assert [["Alice"], ["Bob"]] = result.rows - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "reset_stmt can be called multiple times", %{state: state} do - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "INSERT INTO users VALUES (?, ?, ?)") - - # Execute and reset multiple times - for i <- 1..5 do - {:ok, _} = - EctoLibSql.Native.execute_stmt(state, stmt_id, "INSERT INTO users VALUES (?, ?, ?)", [ - i, - "User#{i}", - 20 + i - ]) - - # Explicit reset - assert :ok = EctoLibSql.Native.reset_stmt(state, stmt_id) - end - - # Verify all inserts - {:ok, _query, result, _state} = - EctoLibSql.handle_execute("SELECT COUNT(*) FROM users", [], [], state) - - assert [[5]] = result.rows - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "reset_stmt returns error for invalid statement", %{state: state} do - # Try to reset non-existent statement - assert {:error, _reason} = EctoLibSql.Native.reset_stmt(state, "invalid_stmt_id") - end - end - - # ============================================================================ - # Statement.get_stmt_columns() - NEW IMPLEMENTATION ✅ - # ============================================================================ - - describe "Statement.get_stmt_columns() full metadata ✅" do - test "get_stmt_columns returns column metadata", %{state: state} do - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE id = ?") - - # Get full column metadata - {:ok, columns} = EctoLibSql.Native.get_stmt_columns(state, stmt_id) - - # Should return list of tuples: {name, origin_name, decl_type} - assert is_list(columns) - assert length(columns) == 3 - - # Verify column metadata structure - [ - {col1_name, col1_origin, col1_type}, - {col2_name, col2_origin, col2_type}, - {col3_name, col3_origin, col3_type} - ] = columns - - # Check column 1 (id) - assert col1_name == "id" - assert col1_origin == "id" - assert col1_type == "INTEGER" - - # Check column 2 (name) - assert col2_name == "name" - assert col2_origin == "name" - assert col2_type == "TEXT" - - # Check column 3 (age) - assert col3_name == "age" - assert col3_origin == "age" - assert col3_type == "INTEGER" - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "get_stmt_columns works with aliased columns", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - "SELECT id as user_id, name as full_name, age as years FROM users" - ) - - {:ok, columns} = EctoLibSql.Native.get_stmt_columns(state, stmt_id) - - assert length(columns) == 3 - - # Check aliased column names - [{col1_name, _, _}, {col2_name, _, _}, {col3_name, _, _}] = columns - - assert col1_name == "user_id" - assert col2_name == "full_name" - assert col3_name == "years" - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "get_stmt_columns works with expressions", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - "SELECT COUNT(*) as total, MAX(age) as oldest FROM users" - ) - - {:ok, columns} = EctoLibSql.Native.get_stmt_columns(state, stmt_id) - - assert length(columns) == 2 - - [{col1_name, _, _}, {col2_name, _, _}] = columns - - assert col1_name == "total" - assert col2_name == "oldest" - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "get_stmt_columns returns error for invalid statement", %{state: state} do - # Try to get columns for non-existent statement - assert {:error, _reason} = EctoLibSql.Native.get_stmt_columns(state, "invalid_stmt_id") - end - end - - # ============================================================================ - # Statement parameter introspection - NOT IMPLEMENTED ❌ - # ============================================================================ - - describe "Statement parameter introspection ✅" do - test "parameter_count returns number of parameters", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE name = ? AND age > ?") - - assert {:ok, 2} = EctoLibSql.Native.stmt_parameter_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_count returns 0 for statements with no parameters", %{state: state} do - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "SELECT * FROM users") - - assert {:ok, 0} = EctoLibSql.Native.stmt_parameter_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_count handles many parameters", %{state: state} do - # Create INSERT statement with 20 parameters - placeholders = Enum.map(1..20, fn _ -> "?" end) |> Enum.join(", ") - columns = Enum.map(1..20, fn i -> "col#{i}" end) |> Enum.join(", ") - - # Create table with 20 columns - create_sql = - "CREATE TABLE many_cols (#{Enum.map(1..20, fn i -> "col#{i} TEXT" end) |> Enum.join(", ")})" - - {:ok, _query, _result, state} = EctoLibSql.handle_execute(create_sql, [], [], state) - - # Prepare INSERT with 20 parameters - insert_sql = "INSERT INTO many_cols (#{columns}) VALUES (#{placeholders})" - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, insert_sql) - - assert {:ok, 20} = EctoLibSql.Native.stmt_parameter_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_count for UPDATE statements", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "UPDATE users SET name = ?, age = ? WHERE id = ?") - - assert {:ok, 3} = EctoLibSql.Native.stmt_parameter_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_count for complex nested queries", %{state: state} do - # Create posts table for JOIN query - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE posts (id INTEGER PRIMARY KEY, user_id INTEGER, title TEXT)", - [], - [], - state - ) - - # Complex query with multiple parameters in different parts - complex_sql = """ - SELECT u.name, COUNT(p.id) as post_count - FROM users u - LEFT JOIN posts p ON u.id = p.user_id - WHERE u.age > ? AND u.name LIKE ? - GROUP BY u.id - HAVING COUNT(p.id) >= ? - """ - - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, complex_sql) - - assert {:ok, 3} = EctoLibSql.Native.stmt_parameter_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_name introspection for named parameters", %{state: state} do - # Test with colon-style named parameters (:name) - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - "INSERT INTO users (id, name, age) VALUES (:id, :name, :age)" - ) - - # Get parameter names (note: SQLite uses 1-based indexing) - {:ok, param1} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 1) - assert param1 == ":id" - - {:ok, param2} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 2) - assert param2 == ":name" - - {:ok, param3} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 3) - assert param3 == ":age" - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_name returns nil for positional parameters", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE name = ? AND age = ?") - - # Positional parameters should return nil - {:ok, param1} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 1) - assert param1 == nil - - {:ok, param2} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 2) - assert param2 == nil - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_name supports dollar-style parameters", %{state: state} do - # Test with dollar-style named parameters ($name) - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE id = $id AND name = $name") - - {:ok, param1} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 1) - assert param1 == "$id" - - {:ok, param2} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 2) - assert param2 == "$name" - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_name supports at-style parameters", %{state: state} do - # Test with at-style named parameters (@name) - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE id = @id AND name = @name") - - {:ok, param1} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 1) - assert param1 == "@id" - - {:ok, param2} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 2) - assert param2 == "@name" - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "parameter_name handles mixed positional and named parameters", %{state: state} do - # SQLite allows mixing positional and named parameters - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "SELECT * FROM users WHERE id = :id AND age > ?") - - {:ok, param1} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 1) - assert param1 == ":id" - - {:ok, param2} = EctoLibSql.Native.stmt_parameter_name(state, stmt_id, 2) - assert param2 == nil - - EctoLibSql.Native.close_stmt(stmt_id) - end - end - - # ============================================================================ - # Column introspection edge cases - # ============================================================================ - - describe "Column introspection edge cases ✅" do - test "column count for SELECT *", %{state: state} do - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "SELECT * FROM users") - - # Should return 3 columns (id, name, age) - assert {:ok, 3} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column count for INSERT without RETURNING", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "INSERT INTO users VALUES (?, ?, ?)") - - # INSERT without RETURNING should return 0 columns - assert {:ok, 0} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column count for UPDATE without RETURNING", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare(state, "UPDATE users SET name = ? WHERE id = ?") - - # UPDATE without RETURNING should return 0 columns - assert {:ok, 0} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column count for DELETE without RETURNING", %{state: state} do - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "DELETE FROM users WHERE id = ?") - - # DELETE without RETURNING should return 0 columns - assert {:ok, 0} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column metadata for aggregate functions", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT - COUNT(*) as total, - AVG(age) as avg_age, - MIN(age) as min_age, - MAX(age) as max_age, - SUM(age) as sum_age - FROM users - """ - ) - - assert {:ok, 5} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - # Check column names - names = get_column_names(state, stmt_id, 5) - assert names == ["total", "avg_age", "min_age", "max_age", "sum_age"] - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column metadata for JOIN with multiple tables", %{state: state} do - # Create posts table - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - "CREATE TABLE posts (id INTEGER PRIMARY KEY, user_id INTEGER, title TEXT, content TEXT)", - [], - [], - state - ) - - # Complex JOIN query - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT - u.id, - u.name, - u.age, - p.id as post_id, - p.title, - p.content - FROM users u - INNER JOIN posts p ON u.id = p.user_id - """ - ) - - assert {:ok, 6} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - names = get_column_names(state, stmt_id, 6) - assert names == ["id", "name", "age", "post_id", "title", "content"] - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column metadata for subqueries", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT - name, - (SELECT COUNT(*) FROM users) as total_users - FROM users - WHERE id = ? - """ - ) - - assert {:ok, 2} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - names = get_column_names(state, stmt_id, 2) - assert names == ["name", "total_users"] - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column metadata for computed expressions", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT - id, - name, - age * 2 as double_age, - UPPER(name) as upper_name, - age + 10 as age_plus_ten - FROM users - """ - ) - - assert {:ok, 5} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - names = get_column_names(state, stmt_id, 5) - assert names == ["id", "name", "double_age", "upper_name", "age_plus_ten"] - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column metadata for all data types (INTEGER, TEXT, BLOB, REAL)", %{state: state} do - # Create table with all major data types - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - """ - CREATE TABLE data_types ( - id INTEGER PRIMARY KEY, - text_col TEXT, - blob_col BLOB, - real_col REAL, - numeric_col NUMERIC - ) - """, - [], - [], - state - ) - - {:ok, stmt_id} = EctoLibSql.Native.prepare(state, "SELECT * FROM data_types") - - assert {:ok, 5} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - # Get full metadata including types - {:ok, columns} = EctoLibSql.Native.get_stmt_columns(state, stmt_id) - - assert length(columns) == 5 - - # Verify column types - [ - {id_name, _, id_type}, - {text_name, _, text_type}, - {blob_name, _, blob_type}, - {real_name, _, real_type}, - {numeric_name, _, numeric_type} - ] = columns - - assert id_name == "id" - assert id_type == "INTEGER" - - assert text_name == "text_col" - assert text_type == "TEXT" - - assert blob_name == "blob_col" - assert blob_type == "BLOB" - - assert real_name == "real_col" - assert real_type == "REAL" - - assert numeric_name == "numeric_col" - assert numeric_type == "NUMERIC" - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column names for SELECT with implicit type conversion", %{state: state} do - # Test column introspection with type casting - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT - CAST(id AS TEXT) as id_text, - CAST(name AS BLOB) as name_blob, - CAST(age AS REAL) as age_real - FROM users - """ - ) - - assert {:ok, 3} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - names = get_column_names(state, stmt_id, 3) - assert names == ["id_text", "name_blob", "age_real"] - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column count for UNION queries", %{state: state} do - # Create another table for UNION test - {:ok, _query, _result, state} = - EctoLibSql.handle_execute( - """ - CREATE TABLE users_backup (id INTEGER PRIMARY KEY, name TEXT, age INTEGER) - """, - [], - [], - state - ) - - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT id, name, age FROM users - UNION - SELECT id, name, age FROM users_backup - """ - ) - - assert {:ok, 3} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - names = get_column_names(state, stmt_id, 3) - assert names == ["id", "name", "age"] - - EctoLibSql.Native.close_stmt(stmt_id) - end - - test "column count for CASE expressions", %{state: state} do - {:ok, stmt_id} = - EctoLibSql.Native.prepare( - state, - """ - SELECT - id, - CASE - WHEN age < 18 THEN 'minor' - WHEN age >= 65 THEN 'senior' - ELSE 'adult' - END as age_group - FROM users - """ - ) - - assert {:ok, 2} = EctoLibSql.Native.stmt_column_count(state, stmt_id) - - names = get_column_names(state, stmt_id, 2) - assert names == ["id", "age_group"] - - EctoLibSql.Native.close_stmt(stmt_id) - end - end - - # ============================================================================ - # Helper Functions - # ============================================================================ - - # Retrieve all column names from a prepared statement. - # This helper reduces duplication when working with multiple column names - # from the same statement. It iterates from 0 to count-1 and retrieves - # each column name using stmt_column_name/3. - defp get_column_names(state, stmt_id, count) do - for i <- 0..(count - 1) do - {:ok, name} = EctoLibSql.Native.stmt_column_name(state, stmt_id, i) - name - end - end -end diff --git a/test/statement_ownership_test.exs b/test/statement_ownership_test.exs index 9567c31..b42b4d1 100644 --- a/test/statement_ownership_test.exs +++ b/test/statement_ownership_test.exs @@ -20,12 +20,8 @@ defmodule EctoLibSql.StatementOwnershipTest do on_exit(fn -> Native.close(conn_id1, :conn_id) Native.close(conn_id2, :conn_id) - File.rm(db_file1) - File.rm(db_file1 <> "-shm") - File.rm(db_file1 <> "-wal") - File.rm(db_file2) - File.rm(db_file2 <> "-shm") - File.rm(db_file2 <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(db_file1) + EctoLibSql.TestHelpers.cleanup_db_files(db_file2) end) {:ok, state1: state1, state2: state2, conn_id1: conn_id1, conn_id2: conn_id2} diff --git a/test/stmt_caching_benchmark_test.exs b/test/stmt_caching_performance_test.exs similarity index 89% rename from test/stmt_caching_benchmark_test.exs rename to test/stmt_caching_performance_test.exs index f64a57b..bbe4da9 100644 --- a/test/stmt_caching_benchmark_test.exs +++ b/test/stmt_caching_performance_test.exs @@ -1,4 +1,11 @@ -defmodule EctoLibSql.StatementCachingBenchmarkTest do +defmodule EctoLibSql.StatementCachingPerformanceTest do + @moduledoc """ + Performance tests for prepared statement caching. + + These tests verify that prepared statements maintain good performance + characteristics when reused. The timing information is for visibility + and documentation - tests always pass regardless of timing. + """ use ExUnit.Case, async: false alias EctoLibSql.Native @@ -25,9 +32,7 @@ defmodule EctoLibSql.StatementCachingBenchmarkTest do on_exit(fn -> Native.close(state.conn_id, :conn_id) - File.rm(db_file) - File.rm(db_file <> "-shm") - File.rm(db_file <> "-wal") + EctoLibSql.TestHelpers.cleanup_db_files(db_file) end) {:ok, state: state} diff --git a/test/test_helper.exs b/test/test_helper.exs index cc2a8eb..09185ad 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1,5 +1,8 @@ -# Exclude :ci_only tests when running locally -# These tests (like path traversal) are only run on CI by default +# Exclude various test categories based on environment +# - :ci_only tests (like path traversal) are only run on CI by default +# - :slow tests (like stress/load tests) are excluded by default to keep test runs fast +# - :flaky tests (like concurrency tests) are excluded by default to avoid CI brittleness +# - :sqlite_limitation tests are for PostgreSQL-only behavior that doesn't work in SQLite ci? = case System.get_env("CI") do nil -> false @@ -8,14 +11,76 @@ ci? = exclude = if ci? do - # Running on CI (GitHub Actions, etc.) - run all tests - [] + # Running on CI (GitHub Actions, etc.) - skip flaky tests and known SQLite limitations + [flaky: true, sqlite_limitation: true] else - # Running locally - skip :ci_only tests - [ci_only: true] + # Running locally - skip :ci_only, :slow, :flaky tests, and SQLite limitations + [ci_only: true, slow: true, flaky: true, sqlite_limitation: true] end ExUnit.start(exclude: exclude) # Set logger level to :info to reduce debug output during tests Logger.configure(level: :info) + +defmodule EctoLibSql.TestHelpers do + @moduledoc """ + Shared helpers for EctoLibSql tests. + """ + + @doc """ + Cleans up all database-related files for a given database path. + + This removes the main database file and all associated files: + - `.db` - Main database file + - `.db-wal` - Write-Ahead Log file + - `.db-shm` - Shared memory file + - `.db-journal` - Journal file (rollback journal mode) + - `.db-info` - LibSQL/Turso replication info file + + ## Example + + on_exit(fn -> + EctoLibSql.TestHelpers.cleanup_db_files("test.db") + end) + """ + @spec cleanup_db_files(String.t()) :: :ok + def cleanup_db_files(db_path) when is_binary(db_path) do + files = [ + db_path, + db_path <> "-wal", + db_path <> "-shm", + db_path <> "-journal", + db_path <> "-info" + ] + + Enum.each(files, fn file -> + File.rm(file) + end) + + :ok + end + + @doc """ + Cleans up all database files matching a pattern using wildcard. + + Useful for cleaning up test databases with unique IDs in their names. + + ## Example + + on_exit(fn -> + EctoLibSql.TestHelpers.cleanup_db_files_matching("z_ecto_libsql_test-*.db") + end) + """ + @spec cleanup_db_files_matching(String.t()) :: :ok + def cleanup_db_files_matching(pattern) when is_binary(pattern) do + Path.wildcard(pattern) + |> Enum.each(&cleanup_db_files/1) + + # Also clean up any orphaned auxiliary files + Path.wildcard(pattern <> "-*") + |> Enum.each(&File.rm/1) + + :ok + end +end diff --git a/test/turso_remote_test.exs b/test/turso_remote_test.exs index 5f3dd5e..ce1a3da 100644 --- a/test/turso_remote_test.exs +++ b/test/turso_remote_test.exs @@ -33,11 +33,7 @@ defmodule TursoRemoteTest do # Helper function to clean up local database files created by tests # SQLite creates multiple files: .db, .db-wal, .db-shm, and Turso creates .db-info defp cleanup_local_db(db_path) do - File.rm(db_path) - File.rm("#{db_path}-wal") - File.rm("#{db_path}-shm") - File.rm("#{db_path}-info") - :ok + EctoLibSql.TestHelpers.cleanup_db_files(db_path) end # Helper function to wait for replica sync to complete diff --git a/test/vector_geospatial_test.exs b/test/vector_geospatial_test.exs index 00b9efa..a81cf6f 100644 --- a/test/vector_geospatial_test.exs +++ b/test/vector_geospatial_test.exs @@ -54,9 +54,7 @@ defmodule Ecto.Vector.GeospatialTest do """) on_exit(fn -> - File.rm(@test_db) - File.rm(@test_db <> "-wal") - File.rm(@test_db <> "-shm") + EctoLibSql.TestHelpers.cleanup_db_files(@test_db) end) :ok