diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2c58261..bfaede5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,6 +33,7 @@ jobs: wasm-build/output/pglite.wasi core/src/main/resources/pgdata/ core/src/main/resources/pglite-files.txt + core/src/main/resources/pglite-dirs.txt build: needs: wasm-build diff --git a/.gitignore b/.gitignore index f4b07f0..8ffccc9 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,6 @@ core/src/main/resources/pglite/ core/src/main/resources/pgdata/ core/src/main/resources/* .idea + +# Backup zip files created by tests +memory:* diff --git a/README.md b/README.md index f56e9dc..02f6a25 100644 --- a/README.md +++ b/README.md @@ -53,39 +53,60 @@ Add the JDBC driver dependency: ### Plain JDBC ```java +// In-memory (ephemeral) — data is lost when the JVM exits Connection conn = DriverManager.getConnection("jdbc:pglite:memory://"); conn.createStatement().execute("CREATE TABLE demo (id serial PRIMARY KEY, name text)"); conn.createStatement().execute("INSERT INTO demo (name) VALUES ('hello')"); ``` +### Persistent storage + +Point the JDBC URL to a file path and `pglite4j` will periodically snapshot the entire in-memory database to a zip file on disk. On the next JVM startup, the database is restored from that snapshot. + +> **Note:** This is **not** traditional disk-backed storage. PostgreSQL runs entirely in memory (ZeroFS). The driver takes periodic snapshots (backup/restore), similar to Redis RDB persistence. Data written between the last snapshot and a crash will be lost. This is suitable for demo apps, prototyping, and development — not for production workloads that require durability guarantees. + +```java +// File-backed — data survives JVM restarts +Connection conn = DriverManager.getConnection("jdbc:pglite:/var/data/mydb.zip"); +``` + +The driver backs up the database on a fixed schedule (default: every 60 seconds) and writes a final snapshot on shutdown. You can configure the backup interval via a connection property: + +```java +Properties props = new Properties(); +props.setProperty("pgliteBackupIntervalSeconds", "30"); +Connection conn = DriverManager.getConnection("jdbc:pglite:/var/data/mydb.zip", props); +``` + +You can also use named in-memory databases for test isolation (separate PG instances, no persistence): + +```java +Connection db1 = DriverManager.getConnection("jdbc:pglite:memory:testA"); +Connection db2 = DriverManager.getConnection("jdbc:pglite:memory:testB"); +``` + ### Quarkus ```properties # application.properties quarkus.datasource.db-kind=postgresql quarkus.datasource.jdbc.url=jdbc:pglite:memory:// +# or persistent: jdbc:pglite:/var/data/myapp.zip quarkus.datasource.jdbc.driver=io.roastedroot.pglite4j.jdbc.PgLiteDriver -quarkus.datasource.username=postgres -quarkus.datasource.password=password -quarkus.datasource.jdbc.min-size=1 quarkus.datasource.jdbc.max-size=5 quarkus.devservices.enabled=false -quarkus.hibernate-orm.dialect=org.hibernate.dialect.PostgreSQLDialect -quarkus.hibernate-orm.unsupported-properties."hibernate.boot.allow_jdbc_metadata_access"=false ``` ### Spring Boot ```properties -# application-test.properties +# application.properties spring.datasource.url=jdbc:pglite:memory:// +# or persistent: jdbc:pglite:/var/data/myapp.zip spring.datasource.driver-class-name=io.roastedroot.pglite4j.jdbc.PgLiteDriver -spring.datasource.username=postgres -spring.datasource.password=password spring.datasource.hikari.maximum-pool-size=5 spring.jpa.hibernate.ddl-auto=create-drop spring.jpa.database-platform=org.hibernate.dialect.PostgreSQLDialect -spring.jpa.properties.hibernate.boot.allow_jdbc_metadata_access=false ``` ### HikariCP @@ -109,7 +130,7 @@ pglite4j/ ## Status and known limitations -- [x] ~~**Only `memory://` is supported**~~ — persistent / file-backed databases are not planned; the WASM backend uses an in-memory virtual filesystem (ZeroFS) with no disk I/O, which is fundamental to the architecture +- [x] ~~**Only `memory://` is supported**~~ — file-backed storage is now supported via periodic snapshots. The database runs entirely in memory; the driver takes a full snapshot (zip of pgdata) on a configurable schedule and on shutdown. On restart the snapshot is restored. This is backup/restore-style persistence (like Redis RDB), not write-ahead logging — data between the last snapshot and a crash is lost - [x] ~~**Single connection only**~~ — multiple JDBC connections are now supported per database instance; requests are serialized through a single PGLite backend via a lock, so connection pools with `max-size > 1` work correctly (queries execute one at a time, not in parallel) - [x] ~~**Error recovery**~~ — both simple and extended query protocol errors are handled correctly; PostgreSQL errors trap the WASM instance and are caught by the Java side, which resets the backend state and drains stale protocol buffers so subsequent queries work cleanly - [ ] **No connection isolation** — PostgreSQL runs in single-user mode with one session; all connections share the same session state (transactions, session variables). Queries are serialized, so there is no data corruption, but concurrent transactions are not isolated from each other. This is fine for connection pools that use connections sequentially (borrow, use, return). diff --git a/core/src/main/java/io/roastedroot/pglite4j/core/PGLite.java b/core/src/main/java/io/roastedroot/pglite4j/core/PGLite.java index 103dd3b..8d54999 100644 --- a/core/src/main/java/io/roastedroot/pglite4j/core/PGLite.java +++ b/core/src/main/java/io/roastedroot/pglite4j/core/PGLite.java @@ -12,11 +12,17 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.FileSystem; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; +import java.util.stream.Stream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; +import java.util.zip.ZipOutputStream; @WasmModuleInterface(WasmResource.absoluteFile) public final class PGLite implements AutoCloseable { @@ -30,10 +36,12 @@ public final class PGLite implements AutoCloseable { private final WasiPreview1 wasi; private final PGLite_ModuleExports exports; private final FileSystem fs; + private final Path dataDir; private int bufferAddr; private int pendingWireLen; - private PGLite() { + private PGLite(Path dataDir) { + this.dataDir = dataDir; try { this.fs = ZeroFs.newFileSystem( @@ -42,6 +50,12 @@ private PGLite() { // Extract pgdata files into ZeroFS. // (share + lib are embedded in the WASM binary via wasi-vfs) extractDistToZeroFs(fs); + + // Restore saved pgdata from a previous session (overwrites defaults). + if (dataDir != null && java.nio.file.Files.exists(dataDir)) { + restoreDataDir(dataDir); + } + Path tmp = fs.getPath("/tmp"); Files.createDirectories(tmp); Path pgdata = fs.getPath("/pgdata"); @@ -89,8 +103,15 @@ private PGLite() { .build(); this.exports = new PGLite_ModuleExports(this.instance); - // pgl_initdb + pgl_backend already executed by wizer at build time. - // closeAllVfds() was called at end of wizer to prevent stale fd PANICs. + if (dataDir != null && java.nio.file.Files.exists(dataDir)) { + // Restored pgdata differs from the wizer snapshot. + // The wizer snapshot's shared buffer pool has stale catalog + // pages from the clean template1 database; invalidate them + // so PostgreSQL re-reads from the restored ZeroFS files. + exports.pglInvalidateBuffers(); + } + + // pgl_initdb + pgl_backend already executed (by wizer or restart above). exports.interactiveWrite(0); int channel = exports.getChannel(); @@ -149,8 +170,61 @@ public static Builder builder() { return new Builder(); } + /** + * Snapshot the in-memory pgdata directory to a zip file on the host filesystem. + * Runs VACUUM FREEZE + CHECKPOINT first so that all tuples are frozen (visible + * after restore without CLOG) and all dirty buffers are flushed to ZeroFS files. + * Writes atomically via temp file + move. + */ + public void dumpDataDir(Path target) throws IOException { + // Freeze all tuple xmin values so they survive restore without + // needing the CLOG (commit log) cache from the original session. + // Then CHECKPOINT to flush all dirty buffers to ZeroFS files. + execProtocolRaw(buildSimpleQuery("VACUUM FREEZE;")); + execProtocolRaw(buildSimpleQuery("CHECKPOINT;")); + + Path tmp = target.resolveSibling(target.getFileName() + ".tmp"); + Path pgdataRoot = fs.getPath(PG_DATA); + + try (ZipOutputStream zos = new ZipOutputStream(java.nio.file.Files.newOutputStream(tmp))) { + try (Stream walk = Files.walk(pgdataRoot)) { + walk.filter(Files::isRegularFile) + .filter(p -> !p.getFileName().toString().startsWith(".s.PGSQL.")) + .forEach( + p -> { + try { + String entryName = pgdataRoot.relativize(p).toString(); + zos.putNextEntry(new ZipEntry(entryName)); + Files.copy(p, zos); + zos.closeEntry(); + } catch (IOException e) { + throw new RuntimeException("Failed to zip " + p, e); + } + }); + } + } + + // Atomic move; fall back to plain replace if the filesystem doesn't support it. + try { + java.nio.file.Files.move( + tmp, + target, + StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + } catch (AtomicMoveNotSupportedException e) { + java.nio.file.Files.move(tmp, target, StandardCopyOption.REPLACE_EXISTING); + } + } + @Override public void close() { + if (dataDir != null) { + try { + dumpDataDir(dataDir); + } catch (IOException | RuntimeException e) { + // best-effort backup on close + } + } try { exports.pglShutdown(); } catch (RuntimeException e) { @@ -224,6 +298,19 @@ private boolean collectReply(List replies) { return false; } + private static byte[] buildSimpleQuery(String query) { + byte[] sql = query.getBytes(StandardCharsets.UTF_8); + byte[] msg = new byte[1 + 4 + sql.length + 1]; + msg[0] = 'Q'; + int len = 4 + sql.length + 1; + msg[1] = (byte) (len >> 24); + msg[2] = (byte) (len >> 16); + msg[3] = (byte) (len >> 8); + msg[4] = (byte) len; + System.arraycopy(sql, 0, msg, 5, sql.length); + return msg; + } + private static byte[] concat(List replies) { int totalLen = 0; for (byte[] r : replies) { @@ -238,8 +325,43 @@ private static byte[] concat(List replies) { return result; } + private void restoreDataDir(Path source) throws IOException { + Path pgdataRoot = fs.getPath(PG_DATA); + try (ZipInputStream zis = new ZipInputStream(java.nio.file.Files.newInputStream(source))) { + ZipEntry entry; + while ((entry = zis.getNextEntry()) != null) { + if (entry.isDirectory()) { + continue; + } + Path target = pgdataRoot.resolve(entry.getName()); + Files.createDirectories(target.getParent()); + // Overwrite classpath defaults with saved state. + Files.deleteIfExists(target); + Files.copy(zis, target); + } + } + } + // === Resource extraction === private static void extractDistToZeroFs(FileSystem fs) throws IOException { + // Create all pgdata directories first (including empty ones that + // PostgreSQL expects, e.g. pg_logical/snapshots). + InputStream dirManifest = PGLite.class.getResourceAsStream("/pglite-dirs.txt"); + if (dirManifest != null) { + try (BufferedReader dr = + new BufferedReader( + new InputStreamReader(dirManifest, StandardCharsets.UTF_8))) { + String line; + while ((line = dr.readLine()) != null) { + line = line.trim(); + if (line.isEmpty()) { + continue; + } + Files.createDirectories(fs.getPath("/" + line)); + } + } + } + InputStream manifest = PGLite.class.getResourceAsStream("/pglite-files.txt"); if (manifest == null) { throw new RuntimeException( @@ -267,10 +389,18 @@ private static void extractDistToZeroFs(FileSystem fs) throws IOException { } public static final class Builder { + private Path dataDir; + private Builder() {} + /** Set a host filesystem path for pgdata backup/restore (zip file). */ + public Builder withDataDir(Path dataDir) { + this.dataDir = dataDir; + return this; + } + public PGLite build() { - return new PGLite(); + return new PGLite(dataDir); } } } diff --git a/core/src/test/java/io/roastedroot/pglite4j/core/PGLiteTest.java b/core/src/test/java/io/roastedroot/pglite4j/core/PGLiteTest.java index 93c2727..4d34401 100644 --- a/core/src/test/java/io/roastedroot/pglite4j/core/PGLiteTest.java +++ b/core/src/test/java/io/roastedroot/pglite4j/core/PGLiteTest.java @@ -3,6 +3,8 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.nio.file.Files; +import java.nio.file.Path; import org.junit.jupiter.api.Test; public class PGLiteTest { @@ -134,6 +136,84 @@ public void extendedProtocolErrorRecovery() { } } + @Test + public void dumpCreatesValidZip() throws Exception { + Path backupFile = Files.createTempFile("pglite-backup-", ".zip"); + Files.delete(backupFile); + + try { + try (PGLite pg = PGLite.builder().build()) { + doHandshake(pg); + + pg.execProtocolRaw( + PgWireCodec.queryMessage("CREATE TABLE persist_test (id INT, val TEXT);")); + pg.execProtocolRaw( + PgWireCodec.queryMessage( + "INSERT INTO persist_test VALUES (1, 'survived');")); + + pg.dumpDataDir(backupFile); + assertTrue(Files.exists(backupFile), "Backup file should exist after dump"); + assertTrue(Files.size(backupFile) > 0, "Backup file should not be empty"); + + // Verify the zip contains pgdata files. + java.util.Set entries = new java.util.HashSet<>(); + try (java.util.zip.ZipInputStream zis = + new java.util.zip.ZipInputStream(Files.newInputStream(backupFile))) { + java.util.zip.ZipEntry e; + while ((e = zis.getNextEntry()) != null) { + entries.add(e.getName()); + } + } + assertTrue(entries.contains("PG_VERSION"), "Zip should contain PG_VERSION"); + assertTrue( + entries.stream().anyMatch(n -> n.startsWith("base/")), + "Zip should contain base/ directory entries"); + } + } finally { + Files.deleteIfExists(backupFile); + } + } + + @Test + public void dataDirectoryBackupRestore() throws Exception { + Path backupFile = Files.createTempFile("pglite-backup-", ".zip"); + Files.delete(backupFile); // start without an existing backup + + try { + // Session 1: create data and dump. + try (PGLite pg = PGLite.builder().withDataDir(backupFile).build()) { + doHandshake(pg); + + pg.execProtocolRaw( + PgWireCodec.queryMessage("CREATE TABLE persist_test (id INT, val TEXT);")); + pg.execProtocolRaw( + PgWireCodec.queryMessage( + "INSERT INTO persist_test VALUES (1, 'survived');")); + + pg.dumpDataDir(backupFile); + assertTrue(Files.exists(backupFile), "Backup file should exist after dump"); + assertTrue(Files.size(backupFile) > 0, "Backup file should not be empty"); + } + + // Session 2: restore from the dump and verify data survived. + try (PGLite pg = PGLite.builder().withDataDir(backupFile).build()) { + doHandshake(pg); + + byte[] r = + pg.execProtocolRaw( + PgWireCodec.queryMessage( + "SELECT val FROM persist_test WHERE id = 1;")); + String data = PgWireCodec.parseDataRows(r); + assertTrue( + data.contains("survived"), + "Data should survive restart via backup/restore, got: " + data); + } + } finally { + Files.deleteIfExists(backupFile); + Files.deleteIfExists(backupFile.resolveSibling(backupFile.getFileName() + ".tmp")); + } + } + static void doHandshake(PGLite pg) { byte[] startup = PgWireCodec.startupMessage("postgres", "template1"); byte[] resp1 = pg.execProtocolRaw(startup); diff --git a/it/src/it/quarkus-pet-clinic/src/main/resources/application.properties b/it/src/it/quarkus-pet-clinic/src/main/resources/application.properties index 6b8c939..0b00d9d 100644 --- a/it/src/it/quarkus-pet-clinic/src/main/resources/application.properties +++ b/it/src/it/quarkus-pet-clinic/src/main/resources/application.properties @@ -1,14 +1,8 @@ quarkus.datasource.db-kind=postgresql quarkus.datasource.jdbc.url=jdbc:pglite:memory:// quarkus.datasource.jdbc.driver=io.roastedroot.pglite4j.jdbc.PgLiteDriver -quarkus.datasource.username=postgres -quarkus.datasource.password=password -quarkus.datasource.jdbc.min-size=1 quarkus.datasource.jdbc.max-size=1 quarkus.hibernate-orm.database.generation=drop-and-create quarkus.devservices.enabled=false - -quarkus.hibernate-orm.dialect=org.hibernate.dialect.PostgreSQLDialect -quarkus.hibernate-orm.unsupported-properties."hibernate.boot.allow_jdbc_metadata_access"=false \ No newline at end of file diff --git a/it/src/it/quarkus-todo-app/invoker.properties b/it/src/it/quarkus-todo-app/invoker.properties new file mode 100644 index 0000000..84099bc --- /dev/null +++ b/it/src/it/quarkus-todo-app/invoker.properties @@ -0,0 +1 @@ +invoker.goals=test diff --git a/it/src/it/quarkus-todo-app/pom.xml b/it/src/it/quarkus-todo-app/pom.xml new file mode 100644 index 0000000..e11a52a --- /dev/null +++ b/it/src/it/quarkus-todo-app/pom.xml @@ -0,0 +1,89 @@ + + + 4.0.0 + + org.acme + quarkus-todo-app-it + 0.0-SNAPSHOT + jar + + + 3.32.1 + 17 + true + 3.5.2 + UTF-8 + + + + + + io.quarkus.platform + quarkus-bom + ${quarkus.platform.version} + pom + import + + + + + + + io.quarkus + quarkus-hibernate-orm-panache + + + io.quarkus + quarkus-jdbc-postgresql + + + io.quarkus + quarkus-rest-jackson + + + io.roastedroot + pglite4j-jdbc + @project.version@ + + + + io.quarkus + quarkus-junit5 + test + + + io.rest-assured + rest-assured + test + + + + + + + io.quarkus.platform + quarkus-maven-plugin + ${quarkus.platform.version} + true + + + + build + generate-code + generate-code-tests + + + + + + maven-surefire-plugin + ${surefire-plugin.version} + + + org.jboss.logmanager.LogManager + + + + + + diff --git a/it/src/it/quarkus-todo-app/seed-data.zip b/it/src/it/quarkus-todo-app/seed-data.zip new file mode 100644 index 0000000..c548514 Binary files /dev/null and b/it/src/it/quarkus-todo-app/seed-data.zip differ diff --git a/it/src/it/quarkus-todo-app/src/main/java/org/acme/Todo.java b/it/src/it/quarkus-todo-app/src/main/java/org/acme/Todo.java new file mode 100644 index 0000000..75f72ce --- /dev/null +++ b/it/src/it/quarkus-todo-app/src/main/java/org/acme/Todo.java @@ -0,0 +1,15 @@ +package org.acme; + +import io.quarkus.hibernate.orm.panache.PanacheEntity; +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Table; + +@Entity +@Table(name = "todos") +public class Todo extends PanacheEntity { + @Column(nullable = false) + public String title; + + public boolean completed; +} diff --git a/it/src/it/quarkus-todo-app/src/main/java/org/acme/TodoResource.java b/it/src/it/quarkus-todo-app/src/main/java/org/acme/TodoResource.java new file mode 100644 index 0000000..eedc6a3 --- /dev/null +++ b/it/src/it/quarkus-todo-app/src/main/java/org/acme/TodoResource.java @@ -0,0 +1,68 @@ +package org.acme; + +import io.quarkus.panache.common.Sort; +import jakarta.transaction.Transactional; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.DELETE; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.POST; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.WebApplicationException; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; +import java.util.List; + +@Path("/todos") +@Produces(MediaType.APPLICATION_JSON) +@Consumes(MediaType.APPLICATION_JSON) +public class TodoResource { + + @GET + public List list() { + return Todo.listAll(Sort.ascending("id")); + } + + @GET + @Path("/{id}") + public Todo get(@PathParam("id") Long id) { + Todo todo = Todo.findById(id); + if (todo == null) { + throw new WebApplicationException(404); + } + return todo; + } + + @POST + @Transactional + public Response create(Todo todo) { + todo.persist(); + return Response.status(Response.Status.CREATED).entity(todo).build(); + } + + @PUT + @Path("/{id}") + @Transactional + public Todo update(@PathParam("id") Long id, Todo todo) { + Todo existing = Todo.findById(id); + if (existing == null) { + throw new WebApplicationException(404); + } + existing.title = todo.title; + existing.completed = todo.completed; + return existing; + } + + @DELETE + @Path("/{id}") + @Transactional + public Response delete(@PathParam("id") Long id) { + boolean deleted = Todo.deleteById(id); + if (!deleted) { + throw new WebApplicationException(404); + } + return Response.noContent().build(); + } +} diff --git a/it/src/it/quarkus-todo-app/src/main/resources/application.properties b/it/src/it/quarkus-todo-app/src/main/resources/application.properties new file mode 100644 index 0000000..fb3434a --- /dev/null +++ b/it/src/it/quarkus-todo-app/src/main/resources/application.properties @@ -0,0 +1,8 @@ +quarkus.datasource.db-kind=postgresql +quarkus.datasource.jdbc.url=jdbc:pglite:seed-data.zip +quarkus.datasource.jdbc.driver=io.roastedroot.pglite4j.jdbc.PgLiteDriver +quarkus.datasource.jdbc.max-size=1 + +quarkus.hibernate-orm.database.generation=none + +quarkus.devservices.enabled=false diff --git a/it/src/it/quarkus-todo-app/src/test/java/org/acme/TodoResourceTest.java b/it/src/it/quarkus-todo-app/src/test/java/org/acme/TodoResourceTest.java new file mode 100644 index 0000000..08776f0 --- /dev/null +++ b/it/src/it/quarkus-todo-app/src/test/java/org/acme/TodoResourceTest.java @@ -0,0 +1,94 @@ +package org.acme; + +import static io.restassured.RestAssured.given; +import static org.hamcrest.CoreMatchers.is; + +import io.quarkus.test.junit.QuarkusTest; +import io.restassured.http.ContentType; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; + +@QuarkusTest +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class TodoResourceTest { + + @Test + @Order(1) + void preExistingDataIsPresent() { + // The database is restored from seed-data.zip which contains + // 3 todos from a previous session — no import.sql, no schema + // generation, the app boots on a non-clean database. + given().when() + .get("/todos") + .then() + .statusCode(200) + .body("size()", is(3)) + .body("[0].title", is("Buy groceries")) + .body("[0].completed", is(true)) + .body("[1].title", is("Walk the dog")) + .body("[1].completed", is(false)) + .body("[2].title", is("Deploy app")) + .body("[2].completed", is(false)); + } + + @Test + @Order(2) + void addNewTodo() { + given().contentType(ContentType.JSON) + .body("{\"title\":\"Fix bug\",\"completed\":false}") + .when() + .post("/todos") + .then() + .statusCode(201) + .body("title", is("Fix bug")) + .body("completed", is(false)); + } + + @Test + @Order(3) + void completeTodo() { + // Mark "Walk the dog" as completed + int id = given().when().get("/todos").then().extract().path("[1].id"); + + given().contentType(ContentType.JSON) + .body("{\"title\":\"Walk the dog\",\"completed\":true}") + .when() + .put("/todos/" + id) + .then() + .statusCode(200) + .body("completed", is(true)); + } + + @Test + @Order(4) + void deleteTodo() { + // Delete "Deploy app" + int id = given().when().get("/todos").then().extract().path("[2].id"); + + given().when().delete("/todos/" + id).then().statusCode(204); + } + + @Test + @Order(5) + void finalState() { + given().when() + .get("/todos") + .then() + .statusCode(200) + .body("size()", is(3)) + .body("[0].title", is("Buy groceries")) + .body("[0].completed", is(true)) + .body("[1].title", is("Walk the dog")) + .body("[1].completed", is(true)) + .body("[2].title", is("Fix bug")) + .body("[2].completed", is(false)); + } + + @Test + @Order(6) + void getNonExistentReturns404() { + given().when().get("/todos/999999").then().statusCode(404); + } +} diff --git a/it/src/it/spring-boot-pet-clinic/src/main/resources/application.properties b/it/src/it/spring-boot-pet-clinic/src/main/resources/application.properties index 3c903d9..3044cd4 100644 --- a/it/src/it/spring-boot-pet-clinic/src/main/resources/application.properties +++ b/it/src/it/spring-boot-pet-clinic/src/main/resources/application.properties @@ -1,8 +1,5 @@ spring.datasource.url=jdbc:pglite:memory:// spring.datasource.driver-class-name=io.roastedroot.pglite4j.jdbc.PgLiteDriver -spring.datasource.username=postgres -spring.datasource.password=password spring.datasource.hikari.maximum-pool-size=1 spring.jpa.hibernate.ddl-auto=create-drop spring.jpa.database-platform=org.hibernate.dialect.PostgreSQLDialect -spring.jpa.properties.hibernate.boot.allow_jdbc_metadata_access=false diff --git a/jdbc/src/main/java/io/roastedroot/pglite4j/jdbc/PgLiteDriver.java b/jdbc/src/main/java/io/roastedroot/pglite4j/jdbc/PgLiteDriver.java index 6401a56..44628a7 100644 --- a/jdbc/src/main/java/io/roastedroot/pglite4j/jdbc/PgLiteDriver.java +++ b/jdbc/src/main/java/io/roastedroot/pglite4j/jdbc/PgLiteDriver.java @@ -7,6 +7,8 @@ import java.net.InetAddress; import java.net.ServerSocket; import java.net.Socket; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.Driver; import java.sql.DriverManager; @@ -19,6 +21,9 @@ import java.util.Properties; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Logger; @@ -55,12 +60,19 @@ public Connection connect(String url, Properties info) throws SQLException { } String dataPath = url.substring(URL_PREFIX.length()); + long backupIntervalSeconds = + Long.parseLong( + info != null + ? info.getProperty("pgliteBackupIntervalSeconds", "60") + : "60"); + ManagedInstance instance = INSTANCES.computeIfAbsent( dataPath, k -> { + Path dir = k.startsWith("memory:") ? null : Paths.get(k); ManagedInstance inst = new ManagedInstance(); - inst.boot(); + inst.boot(dir, backupIntervalSeconds); return inst; }); @@ -111,17 +123,31 @@ public Logger getParentLogger() throws SQLFeatureNotSupportedException { throw new SQLFeatureNotSupportedException(); } + static void closeAndEvict(String dataPath) { + ManagedInstance inst = INSTANCES.remove(dataPath); + if (inst != null) { + inst.close(); + } + } + static final class ManagedInstance { private PGLite pgLite; private ServerSocket serverSocket; private volatile boolean running; + private Path dataDir; + private ScheduledExecutorService backupScheduler; private final Object pgLock = new Object(); private final AtomicInteger connectionCounter = new AtomicInteger(); private final Set activeSockets = ConcurrentHashMap.newKeySet(); private volatile List cachedStartupResponses; - void boot() { - pgLite = PGLite.builder().build(); + void boot(Path dataDir, long backupIntervalSeconds) { + this.dataDir = dataDir; + PGLite.Builder b = PGLite.builder(); + if (dataDir != null) { + b.withDataDir(dataDir); + } + pgLite = b.build(); try { serverSocket = new ServerSocket(0, 50, InetAddress.getByName("127.0.0.1")); } catch (IOException e) { @@ -132,6 +158,31 @@ void boot() { Thread acceptThread = new Thread(this::acceptLoop, "pglite-accept"); acceptThread.setDaemon(true); acceptThread.start(); + + if (dataDir != null) { + backupScheduler = + Executors.newSingleThreadScheduledExecutor( + r -> { + Thread t = new Thread(r, "pglite-backup"); + t.setDaemon(true); + return t; + }); + backupScheduler.scheduleAtFixedRate( + this::scheduledBackup, + backupIntervalSeconds, + backupIntervalSeconds, + TimeUnit.SECONDS); + } + } + + private void scheduledBackup() { + try { + synchronized (pgLock) { + pgLite.dumpDataDir(dataDir); + } + } catch (IOException e) { + // best-effort periodic backup + } } int getPort() { @@ -256,6 +307,9 @@ private static boolean endsWithReadyForQuery(byte[] response) { void close() { running = false; + if (backupScheduler != null) { + backupScheduler.shutdownNow(); + } try { serverSocket.close(); } catch (IOException e) { @@ -268,6 +322,18 @@ void close() { // cleanup } } + // Final dump before destroying the WASM instance. + // PGLite.close() also dumps, but doing it here under pgLock + // prevents races with any in-flight connection handlers. + if (dataDir != null) { + try { + synchronized (pgLock) { + pgLite.dumpDataDir(dataDir); + } + } catch (IOException e) { + // best-effort + } + } pgLite.close(); } } diff --git a/jdbc/src/test/java/io/roastedroot/pglite4j/jdbc/PgLiteDriverTest.java b/jdbc/src/test/java/io/roastedroot/pglite4j/jdbc/PgLiteDriverTest.java index c5006b3..3235066 100644 --- a/jdbc/src/test/java/io/roastedroot/pglite4j/jdbc/PgLiteDriverTest.java +++ b/jdbc/src/test/java/io/roastedroot/pglite4j/jdbc/PgLiteDriverTest.java @@ -4,6 +4,8 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.nio.file.Files; +import java.nio.file.Path; import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; @@ -201,6 +203,46 @@ void multipleConnectionsSameDatabase() throws SQLException { @Test @Order(12) + void persistentStorageBackupRestore() throws Exception { + Path backupFile = Files.createTempFile("pglite-jdbc-backup-", ".zip"); + Files.delete(backupFile); + String url = "jdbc:pglite:" + backupFile.toAbsolutePath(); + + try { + // Session 1: create schema and data, then close the instance. + try (Connection conn = DriverManager.getConnection(url)) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("CREATE TABLE persist (id SERIAL PRIMARY KEY, val TEXT)"); + stmt.executeUpdate("INSERT INTO persist (val) VALUES ('survived')"); + stmt.executeUpdate("INSERT INTO persist (val) VALUES ('restart')"); + } + } + // Closing the JDBC connection does not close the ManagedInstance. + // Evict it explicitly so the next connect creates a fresh instance + // that restores from the backup zip. + PgLiteDriver.closeAndEvict(backupFile.toAbsolutePath().toString()); + assertTrue(Files.exists(backupFile), "Backup file should exist after evict"); + + // Session 2: reconnect — the driver boots a new PGLite from the backup. + try (Connection conn = DriverManager.getConnection(url)) { + try (Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT val FROM persist ORDER BY id")) { + assertTrue(rs.next()); + assertEquals("survived", rs.getString("val")); + assertTrue(rs.next()); + assertEquals("restart", rs.getString("val")); + assertFalse(rs.next()); + } + } + PgLiteDriver.closeAndEvict(backupFile.toAbsolutePath().toString()); + } finally { + Files.deleteIfExists(backupFile); + Files.deleteIfExists(backupFile.resolveSibling(backupFile.getFileName() + ".tmp")); + } + } + + @Test + @Order(13) void connectionCloseDoesNotAffectOther() throws SQLException { String url = "jdbc:pglite:memory:closetest"; Connection conn1 = DriverManager.getConnection(url); diff --git a/wasm-build/Makefile b/wasm-build/Makefile index da202ad..7e46525 100644 --- a/wasm-build/Makefile +++ b/wasm-build/Makefile @@ -17,6 +17,7 @@ clean: rm -f $(WASM_FILE) rm -rf $(RESOURCES_DIR)/pgdata rm -f $(RESOURCES_DIR)/pglite-files.txt + rm -f $(RESOURCES_DIR)/pglite-dirs.txt rm -f $(RESOURCES_DIR)/pglite.wasi unpack: $(ARCHIVE) @@ -31,8 +32,9 @@ unpack: $(ARCHIVE) @echo "=== Copying pgdata to core/src/main/resources/ ===" mkdir -p $(RESOURCES_DIR) cp -r $(OUTPUT_DIR)/tmp/pgdata $(RESOURCES_DIR)/pgdata - @echo "=== Generating pglite-files.txt manifest ===" + @echo "=== Generating pglite-files.txt and pglite-dirs.txt manifests ===" cd $(RESOURCES_DIR) && find pgdata -type f | sort > $(RESOURCES_DIR)/pglite-files.txt + cd $(RESOURCES_DIR) && find pgdata -type d | sort > $(RESOURCES_DIR)/pglite-dirs.txt rm -rf $(OUTPUT_DIR)/tmp @echo "=== Done ===" ls -lh $(WASM_FILE) diff --git a/wasm-build/patches/pglite-wasm/pgl_mains.c.diff b/wasm-build/patches/pglite-wasm/pgl_mains.c.diff index e634e2c..756e912 100644 --- a/wasm-build/patches/pglite-wasm/pgl_mains.c.diff +++ b/wasm-build/patches/pglite-wasm/pgl_mains.c.diff @@ -1,6 +1,32 @@ --- a/pgl_mains.c +++ b/pgl_mains.c -@@ -335,6 +335,8 @@ +@@ -22,6 +22,25 @@ + return 0; + } + ++#include ++#include "storage/buf_internals.h" ++#include "utils/inval.h" ++ ++__attribute__((export_name("pgl_invalidate_buffers"))) ++void ++pgl_invalidate_buffers() { ++ int i; ++ /* Clear shared buffer pool so stale pages are re-read from disk. */ ++ for (i = 0; i < NBuffers; i++) { ++ BufferDesc *bufHdr = GetBufferDescriptor(i); ++ pg_atomic_write_u32(&bufHdr->state, 0); ++ memset(&bufHdr->tag, 0, sizeof(bufHdr->tag)); ++ } ++ /* Flush per-backend catalog/relation caches so they are rebuilt ++ * from the restored pg_class, pg_attribute, etc. pages. */ ++ InvalidateSystemCaches(); ++} ++ + #if FIXME + extern bool startswith(const char *str, const char *prefix); + #endif +@@ -335,6 +349,8 @@ CreateSharedMemoryAndSemaphores(); @@ -9,7 +35,7 @@ /* * Remember stand-alone backend startup time,roughly at the same point * during startup that postmaster does so. -@@ -348,6 +350,7 @@ +@@ -348,6 +364,7 @@ InitProcess(); // main @@ -17,7 +43,7 @@ SetProcessingMode(InitProcessing); /* Early initialization */ -@@ -355,6 +358,7 @@ +@@ -355,6 +372,7 @@ async_db_change:; PDEBUG("# 167");