From d84b7a7f9ee56d9bd5674a2c4583f20873f0c695 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Wed, 25 Mar 2026 22:09:07 -0700 Subject: [PATCH 01/38] Add first-pass TPC-C module and Rust benchmark runner --- Cargo.lock | 28 + Cargo.toml | 2 + modules/tpcc/Cargo.toml | 15 + modules/tpcc/src/lib.rs | 1202 +++++++++++++++++ tools/tpcc-runner/Cargo.toml | 27 + tools/tpcc-runner/README.md | 95 ++ tools/tpcc-runner/src/client.rs | 332 +++++ tools/tpcc-runner/src/config.rs | 306 +++++ tools/tpcc-runner/src/coordinator.rs | 125 ++ tools/tpcc-runner/src/driver.rs | 619 +++++++++ tools/tpcc-runner/src/loader.rs | 307 +++++ tools/tpcc-runner/src/main.rs | 27 + .../module_bindings/customer_selector_type.rs | 17 + .../src/module_bindings/customer_type.rs | 112 ++ .../delivery_completion_type.rs | 79 ++ .../delivery_completion_view_type.rs | 25 + .../src/module_bindings/delivery_job_type.rs | 84 ++ .../delivery_progress_procedure.rs | 54 + .../module_bindings/delivery_progress_type.rs | 17 + .../delivery_queue_ack_type.rs | 18 + .../src/module_bindings/district_type.rs | 82 ++ .../fetch_delivery_completions_procedure.rs | 68 + .../src/module_bindings/history_type.rs | 73 + .../src/module_bindings/item_type.rs | 61 + .../module_bindings/load_customers_reducer.rs | 68 + .../module_bindings/load_districts_reducer.rs | 68 + .../module_bindings/load_history_reducer.rs | 68 + .../src/module_bindings/load_items_reducer.rs | 67 + .../load_new_orders_reducer.rs | 68 + .../load_order_lines_reducer.rs | 68 + .../module_bindings/load_orders_reducer.rs | 67 + .../module_bindings/load_stocks_reducer.rs | 67 + .../load_warehouses_reducer.rs | 68 + tools/tpcc-runner/src/module_bindings/mod.rs | 889 ++++++++++++ .../new_order_line_input_type.rs | 17 + .../new_order_line_result_type.rs | 22 + .../module_bindings/new_order_procedure.rs | 69 + .../module_bindings/new_order_result_type.rs | 26 + .../src/module_bindings/new_order_type.rs | 58 + .../src/module_bindings/o_order_type.rs | 73 + .../src/module_bindings/order_line_type.rs | 79 ++ .../order_status_line_result_type.rs | 19 + .../module_bindings/order_status_procedure.rs | 61 + .../order_status_result_type.rs | 25 + .../src/module_bindings/payment_procedure.rs | 85 ++ .../module_bindings/payment_result_type.rs | 25 + .../queue_delivery_procedure.rs | 84 ++ .../src/module_bindings/reset_tpcc_reducer.rs | 61 + .../module_bindings/stock_level_procedure.rs | 60 + .../stock_level_result_type.rs | 18 + .../src/module_bindings/stock_type.rs | 100 ++ .../src/module_bindings/warehouse_type.rs | 73 + tools/tpcc-runner/src/protocol.rs | 36 + tools/tpcc-runner/src/summary.rs | 585 ++++++++ tools/tpcc-runner/src/tpcc.rs | 192 +++ 55 files changed, 7041 insertions(+) create mode 100644 modules/tpcc/Cargo.toml create mode 100644 modules/tpcc/src/lib.rs create mode 100644 tools/tpcc-runner/Cargo.toml create mode 100644 tools/tpcc-runner/README.md create mode 100644 tools/tpcc-runner/src/client.rs create mode 100644 tools/tpcc-runner/src/config.rs create mode 100644 tools/tpcc-runner/src/coordinator.rs create mode 100644 tools/tpcc-runner/src/driver.rs create mode 100644 tools/tpcc-runner/src/loader.rs create mode 100644 tools/tpcc-runner/src/main.rs create mode 100644 tools/tpcc-runner/src/module_bindings/customer_selector_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/customer_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/delivery_completion_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/delivery_completion_view_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/delivery_job_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/delivery_progress_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/delivery_queue_ack_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/district_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/history_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/item_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_history_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_items_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/mod.rs create mode 100644 tools/tpcc-runner/src/module_bindings/new_order_line_input_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/new_order_line_result_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/new_order_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/new_order_result_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/new_order_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/o_order_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/order_line_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/order_status_line_result_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/order_status_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/order_status_result_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/payment_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/payment_result_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/stock_level_result_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/stock_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/warehouse_type.rs create mode 100644 tools/tpcc-runner/src/protocol.rs create mode 100644 tools/tpcc-runner/src/summary.rs create mode 100644 tools/tpcc-runner/src/tpcc.rs diff --git a/Cargo.lock b/Cargo.lock index 03618187864..0fa10cea95d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9787,6 +9787,34 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +[[package]] +name = "tpcc-module" +version = "0.1.0" +dependencies = [ + "anyhow", + "log", + "spacetimedb 2.1.0", +] + +[[package]] +name = "tpcc-runner" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "clap 4.5.50", + "env_logger 0.10.2", + "log", + "parking_lot 0.12.5", + "rand 0.9.2", + "reqwest 0.12.24", + "serde", + "serde_json", + "spacetimedb-sdk", + "tokio", + "toml 0.8.23", +] + [[package]] name = "tracing" version = "0.1.41" diff --git a/Cargo.toml b/Cargo.toml index 49c1fa355da..7228dba44c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ members = [ "modules/sdk-test-view", "modules/sdk-test-view-pk", "modules/sdk-test-event-table", + "modules/tpcc", "sdks/rust/tests/test-client", "sdks/rust/tests/test-counter", "sdks/rust/tests/connect_disconnect_client", @@ -62,6 +63,7 @@ members = [ "tools/upgrade-version", "tools/license-check", "tools/replace-spacetimedb", + "tools/tpcc-runner", "tools/generate-client-api", "tools/gen-bindings", "tools/xtask-llm-benchmark", diff --git a/modules/tpcc/Cargo.toml b/modules/tpcc/Cargo.toml new file mode 100644 index 00000000000..e21da7f3a8a --- /dev/null +++ b/modules/tpcc/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "tpcc-module" +version = "0.1.0" +edition.workspace = true + +[lib] +crate-type = ["cdylib"] + +[dependencies] +anyhow.workspace = true +log.workspace = true +spacetimedb = { workspace = true, features = ["unstable"] } + +[lints] +workspace = true diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs new file mode 100644 index 00000000000..ec3c7737663 --- /dev/null +++ b/modules/tpcc/src/lib.rs @@ -0,0 +1,1202 @@ +use spacetimedb::{ + procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, +}; +use std::collections::BTreeSet; + +const DISTRICTS_PER_WAREHOUSE: u8 = 10; +const CUSTOMERS_PER_DISTRICT: u32 = 3_000; +const ITEMS: u32 = 100_000; +const MAX_C_DATA_LEN: usize = 500; +const TAX_SCALE: i64 = 10_000; + +macro_rules! ensure { + ($cond:expr, $($arg:tt)+) => { + if !($cond) { + return Err(format!($($arg)+)); + } + }; +} + +#[derive(Clone, Debug, SpacetimeType)] +pub enum CustomerSelector { + ById(u32), + ByLastName(String), +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderLineInput { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderLineResult { + pub item_id: u32, + pub item_name: String, + pub supply_w_id: u16, + pub quantity: u32, + pub stock_quantity: i32, + pub item_price_cents: i64, + pub amount_cents: i64, + pub brand_generic: String, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderResult { + pub warehouse_tax_bps: i32, + pub district_tax_bps: i32, + pub customer_discount_bps: i32, + pub customer_last: String, + pub customer_credit: String, + pub order_id: u32, + pub entry_d: Timestamp, + pub total_amount_cents: i64, + pub all_local: bool, + pub lines: Vec, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct PaymentResult { + pub warehouse_name: String, + pub district_name: String, + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub customer_credit: String, + pub customer_discount_bps: i32, + pub payment_amount_cents: i64, + pub customer_data: Option, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct OrderStatusLineResult { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, + pub amount_cents: i64, + pub delivery_d: Option, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct OrderStatusResult { + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub order_id: Option, + pub order_entry_d: Option, + pub carrier_id: Option, + pub lines: Vec, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct StockLevelResult { + pub warehouse_id: u16, + pub district_id: u8, + pub threshold: i32, + pub low_stock_count: u32, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct DeliveryQueueAck { + pub scheduled_id: u64, + pub queued_at: Timestamp, + pub warehouse_id: u16, + pub carrier_id: u8, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct DeliveryProgress { + pub run_id: String, + pub pending_jobs: u64, + pub completed_jobs: u64, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct DeliveryCompletionView { + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: Timestamp, + pub completed_at: Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +#[table(accessor = warehouse)] +#[derive(Clone, Debug)] +pub struct Warehouse { + #[primary_key] + pub w_id: u16, + pub w_name: String, + pub w_street_1: String, + pub w_street_2: String, + pub w_city: String, + pub w_state: String, + pub w_zip: String, + pub w_tax_bps: i32, + pub w_ytd_cents: i64, +} + +#[table( + accessor = district, + index(accessor = by_w_d, btree(columns = [d_w_id, d_id])) +)] +#[derive(Clone, Debug)] +pub struct District { + #[primary_key] + pub district_key: u32, + pub d_w_id: u16, + pub d_id: u8, + pub d_name: String, + pub d_street_1: String, + pub d_street_2: String, + pub d_city: String, + pub d_state: String, + pub d_zip: String, + pub d_tax_bps: i32, + pub d_ytd_cents: i64, + pub d_next_o_id: u32, +} + +#[table( + accessor = customer, + index(accessor = by_w_d_c_id, btree(columns = [c_w_id, c_d_id, c_id])), + index(accessor = by_w_d_last_first_id, btree(columns = [c_w_id, c_d_id, c_last, c_first, c_id])) +)] +#[derive(Clone, Debug)] +pub struct Customer { + #[primary_key] + pub customer_key: u64, + pub c_w_id: u16, + pub c_d_id: u8, + pub c_id: u32, + pub c_first: String, + pub c_middle: String, + pub c_last: String, + pub c_street_1: String, + pub c_street_2: String, + pub c_city: String, + pub c_state: String, + pub c_zip: String, + pub c_phone: String, + pub c_since: Timestamp, + pub c_credit: String, + pub c_credit_lim_cents: i64, + pub c_discount_bps: i32, + pub c_balance_cents: i64, + pub c_ytd_payment_cents: i64, + pub c_payment_cnt: u32, + pub c_delivery_cnt: u32, + pub c_data: String, +} + +#[table(accessor = history)] +#[derive(Clone, Debug)] +pub struct History { + #[primary_key] + #[auto_inc] + pub history_id: u64, + pub h_c_id: u32, + pub h_c_d_id: u8, + pub h_c_w_id: u16, + pub h_d_id: u8, + pub h_w_id: u16, + pub h_date: Timestamp, + pub h_amount_cents: i64, + pub h_data: String, +} + +#[table(accessor = item)] +#[derive(Clone, Debug)] +pub struct Item { + #[primary_key] + pub i_id: u32, + pub i_im_id: u32, + pub i_name: String, + pub i_price_cents: i64, + pub i_data: String, +} + +#[table( + accessor = stock, + index(accessor = by_w_i, btree(columns = [s_w_id, s_i_id])) +)] +#[derive(Clone, Debug)] +pub struct Stock { + #[primary_key] + pub stock_key: u64, + pub s_w_id: u16, + pub s_i_id: u32, + pub s_quantity: i32, + pub s_dist_01: String, + pub s_dist_02: String, + pub s_dist_03: String, + pub s_dist_04: String, + pub s_dist_05: String, + pub s_dist_06: String, + pub s_dist_07: String, + pub s_dist_08: String, + pub s_dist_09: String, + pub s_dist_10: String, + pub s_ytd: u64, + pub s_order_cnt: u32, + pub s_remote_cnt: u32, + pub s_data: String, +} + +#[table( + accessor = oorder, + index(accessor = by_w_d_o_id, btree(columns = [o_w_id, o_d_id, o_id])), + index(accessor = by_w_d_c_o_id, btree(columns = [o_w_id, o_d_id, o_c_id, o_id])) +)] +#[derive(Clone, Debug)] +pub struct OOrder { + #[primary_key] + pub order_key: u64, + pub o_w_id: u16, + pub o_d_id: u8, + pub o_id: u32, + pub o_c_id: u32, + pub o_entry_d: Timestamp, + pub o_carrier_id: Option, + pub o_ol_cnt: u8, + pub o_all_local: bool, +} + +#[table( + accessor = new_order_row, + index(accessor = by_w_d_o_id, btree(columns = [no_w_id, no_d_id, no_o_id])) +)] +#[derive(Clone, Debug)] +pub struct NewOrder { + #[primary_key] + pub new_order_key: u64, + pub no_w_id: u16, + pub no_d_id: u8, + pub no_o_id: u32, +} + +#[table( + accessor = order_line, + index(accessor = by_w_d_o_number, btree(columns = [ol_w_id, ol_d_id, ol_o_id, ol_number])) +)] +#[derive(Clone, Debug)] +pub struct OrderLine { + #[primary_key] + pub order_line_key: u64, + pub ol_w_id: u16, + pub ol_d_id: u8, + pub ol_o_id: u32, + pub ol_number: u8, + pub ol_i_id: u32, + pub ol_supply_w_id: u16, + pub ol_delivery_d: Option, + pub ol_quantity: u32, + pub ol_amount_cents: i64, + pub ol_dist_info: String, +} + +#[table( + accessor = delivery_job, + scheduled(run_delivery_job), + index(accessor = by_run_id, btree(columns = [run_id])) +)] +#[derive(Clone, Debug)] +pub struct DeliveryJob { + #[primary_key] + #[auto_inc] + pub scheduled_id: u64, + pub scheduled_at: ScheduleAt, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub queued_at: Timestamp, + pub w_id: u16, + pub carrier_id: u8, + pub next_d_id: u8, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +#[table( + accessor = delivery_completion, + index(accessor = by_run_completion, btree(columns = [run_id, completion_id])) +)] +#[derive(Clone, Debug)] +pub struct DeliveryCompletion { + #[primary_key] + #[auto_inc] + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: Timestamp, + pub completed_at: Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +struct PaymentRequest<'a> { + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer_selector: &'a CustomerSelector, + payment_amount_cents: i64, + now: Timestamp, +} + +#[reducer] +pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { + for row in ctx.db.delivery_job().iter() { + ctx.db.delivery_job().delete(row); + } + for row in ctx.db.delivery_completion().iter() { + ctx.db.delivery_completion().delete(row); + } + for row in ctx.db.order_line().iter() { + ctx.db.order_line().delete(row); + } + for row in ctx.db.new_order_row().iter() { + ctx.db.new_order_row().delete(row); + } + for row in ctx.db.oorder().iter() { + ctx.db.oorder().delete(row); + } + for row in ctx.db.history().iter() { + ctx.db.history().delete(row); + } + for row in ctx.db.customer().iter() { + ctx.db.customer().delete(row); + } + for row in ctx.db.district().iter() { + ctx.db.district().delete(row); + } + for row in ctx.db.stock().iter() { + ctx.db.stock().delete(row); + } + for row in ctx.db.item().iter() { + ctx.db.item().delete(row); + } + for row in ctx.db.warehouse().iter() { + ctx.db.warehouse().delete(row); + } + Ok(()) +} + +#[reducer] +pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + validate_warehouse_row(&row)?; + ctx.db.warehouse().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + validate_district_row(&row)?; + ctx.db.district().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + validate_customer_row(&row)?; + ctx.db.customer().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for mut row in rows { + row.history_id = 0; + ctx.db.history().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + validate_item_row(&row)?; + ctx.db.item().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_stocks(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + validate_stock_row(&row)?; + ctx.db.stock().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + ctx.db.oorder().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + ctx.db.new_order_row().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + ctx.db.order_line().insert(row); + } + Ok(()) +} + +#[procedure] +pub fn new_order( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, +) -> Result { + ctx.try_with_tx(|tx| new_order_tx(tx, w_id, d_id, c_id, order_lines.clone())) +} + +#[procedure] +pub fn payment( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, +) -> Result { + let now = ctx.timestamp; + ctx.try_with_tx(|tx| { + payment_tx( + tx, + PaymentRequest { + w_id, + d_id, + c_w_id, + c_d_id, + customer_selector: &customer, + payment_amount_cents, + now, + }, + ) + }) +} + +#[procedure] +pub fn order_status( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + customer: CustomerSelector, +) -> Result { + ctx.try_with_tx(|tx| order_status_tx(tx, w_id, d_id, &customer)) +} + +#[procedure] +pub fn stock_level( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + threshold: i32, +) -> Result { + ctx.try_with_tx(|tx| stock_level_tx(tx, w_id, d_id, threshold)) +} + +#[procedure] +pub fn queue_delivery( + ctx: &mut ProcedureContext, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, +) -> Result { + let queued_at = ctx.timestamp; + ctx.try_with_tx(|tx| { + ensure_warehouse_exists(tx, w_id)?; + ensure!((1..=10).contains(&carrier_id), "carrier_id must be in the range 1..=10"); + + let job = tx.db.delivery_job().insert(DeliveryJob { + scheduled_id: 0, + scheduled_at: queued_at.into(), + run_id: run_id.clone(), + driver_id: driver_id.clone(), + terminal_id, + request_id, + queued_at, + w_id, + carrier_id, + next_d_id: 1, + skipped_districts: 0, + processed_districts: 0, + }); + + Ok(DeliveryQueueAck { + scheduled_id: job.scheduled_id, + queued_at, + warehouse_id: w_id, + carrier_id, + }) + }) +} + +#[procedure] +pub fn delivery_progress(ctx: &mut ProcedureContext, run_id: String) -> Result { + ctx.try_with_tx(|tx| { + let pending_jobs = tx.db.delivery_job().by_run_id().filter(&run_id).count() as u64; + let completed_jobs = tx + .db + .delivery_completion() + .by_run_completion() + .filter((&run_id, 0u64..)) + .count() as u64; + Ok(DeliveryProgress { + run_id: run_id.clone(), + pending_jobs, + completed_jobs, + }) + }) +} + +#[procedure] +pub fn fetch_delivery_completions( + ctx: &mut ProcedureContext, + run_id: String, + after_completion_id: u64, + limit: u32, +) -> Result, String> { + ctx.try_with_tx(|tx| { + let limit = limit as usize; + let rows = tx + .db + .delivery_completion() + .by_run_completion() + .filter((&run_id, after_completion_id.saturating_add(1)..)) + .take(limit) + .map(as_delivery_completion_view) + .collect(); + Ok(rows) + }) +} + +#[reducer] +pub fn run_delivery_job(ctx: &ReducerContext, job: DeliveryJob) -> Result<(), String> { + let mut next_job = job.clone(); + + let had_order = process_delivery_district(ctx, job.w_id, job.next_d_id, job.carrier_id, ctx.timestamp)?; + next_job.processed_districts = next_job.processed_districts.saturating_add(1); + if !had_order { + next_job.skipped_districts = next_job.skipped_districts.saturating_add(1); + } + + let jobs = ctx.db.delivery_job(); + jobs.scheduled_id().delete(job.scheduled_id); + + if job.next_d_id >= DISTRICTS_PER_WAREHOUSE { + ctx.db.delivery_completion().insert(DeliveryCompletion { + completion_id: 0, + run_id: job.run_id, + driver_id: job.driver_id, + terminal_id: job.terminal_id, + request_id: job.request_id, + warehouse_id: job.w_id, + carrier_id: job.carrier_id, + queued_at: job.queued_at, + completed_at: ctx.timestamp, + skipped_districts: next_job.skipped_districts, + processed_districts: next_job.processed_districts, + }); + } else { + next_job.next_d_id += 1; + next_job.scheduled_at = ctx.timestamp.into(); + ctx.db.delivery_job().insert(next_job); + } + + Ok(()) +} + +fn validate_warehouse_row(row: &Warehouse) -> Result<(), String> { + ensure!( + (1..=i32::from(u16::MAX)).contains(&(row.w_id as i32)), + "warehouse id must be positive" + ); + Ok(()) +} + +fn validate_district_row(row: &District) -> Result<(), String> { + ensure!( + row.district_key == pack_district_key(row.d_w_id, row.d_id), + "district row has mismatched packed key" + ); + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&row.d_id), + "district id out of range" + ); + Ok(()) +} + +fn validate_customer_row(row: &Customer) -> Result<(), String> { + ensure!( + row.customer_key == pack_customer_key(row.c_w_id, row.c_d_id, row.c_id), + "customer row has mismatched packed key" + ); + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&row.c_d_id), + "customer district id out of range" + ); + ensure!( + (1..=CUSTOMERS_PER_DISTRICT).contains(&row.c_id), + "customer id out of range" + ); + Ok(()) +} + +fn validate_item_row(row: &Item) -> Result<(), String> { + ensure!((1..=ITEMS).contains(&row.i_id), "item id out of range"); + Ok(()) +} + +fn validate_stock_row(row: &Stock) -> Result<(), String> { + ensure!( + row.stock_key == pack_stock_key(row.s_w_id, row.s_i_id), + "stock row has mismatched packed key" + ); + ensure!((1..=ITEMS).contains(&row.s_i_id), "stock item id out of range"); + Ok(()) +} + +fn new_order_tx( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, +) -> Result { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), + "district id out of range" + ); + ensure!( + (5..=15).contains(&order_lines.len()), + "new-order requires between 5 and 15 order lines" + ); + + let warehouse = find_warehouse(tx, w_id)?; + let district = find_district(tx, w_id, d_id)?; + let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; + + let mut touched_items = Vec::with_capacity(order_lines.len()); + let mut all_local = true; + for line in &order_lines { + ensure!(line.quantity > 0, "order line quantity must be positive"); + let item = find_item(tx, line.item_id)?; + let stock = find_stock(tx, line.supply_w_id, line.item_id)?; + if line.supply_w_id != w_id { + all_local = false; + } + touched_items.push((line.clone(), item, stock)); + } + + let order_id = district.d_next_o_id; + + tx.db.district().district_key().update(District { + d_next_o_id: district.d_next_o_id + 1, + ..district.clone() + }); + + tx.db.oorder().insert(OOrder { + order_key: pack_order_key(w_id, d_id, order_id), + o_w_id: w_id, + o_d_id: d_id, + o_id: order_id, + o_c_id: c_id, + o_entry_d: tx.timestamp, + o_carrier_id: None, + o_ol_cnt: order_lines.len() as u8, + o_all_local: all_local, + }); + + tx.db.new_order_row().insert(NewOrder { + new_order_key: pack_order_key(w_id, d_id, order_id), + no_w_id: w_id, + no_d_id: d_id, + no_o_id: order_id, + }); + + let mut line_results = Vec::with_capacity(touched_items.len()); + let mut subtotal_cents = 0i64; + for (idx, (line, item, stock)) in touched_items.into_iter().enumerate() { + let updated_stock_quantity = adjust_stock_quantity(stock.s_quantity, line.quantity as i32); + tx.db.stock().stock_key().update(Stock { + s_quantity: updated_stock_quantity, + s_ytd: stock.s_ytd + u64::from(line.quantity), + s_order_cnt: stock.s_order_cnt + 1, + s_remote_cnt: stock.s_remote_cnt + u32::from(line.supply_w_id != w_id), + ..stock.clone() + }); + + let line_amount_cents = item.i_price_cents * i64::from(line.quantity); + subtotal_cents += line_amount_cents; + let dist_info = district_stock_info(&stock, d_id); + tx.db.order_line().insert(OrderLine { + order_line_key: pack_order_line_key(w_id, d_id, order_id, (idx + 1) as u8), + ol_w_id: w_id, + ol_d_id: d_id, + ol_o_id: order_id, + ol_number: (idx + 1) as u8, + ol_i_id: line.item_id, + ol_supply_w_id: line.supply_w_id, + ol_delivery_d: None, + ol_quantity: line.quantity, + ol_amount_cents: line_amount_cents, + ol_dist_info: dist_info, + }); + + let brand_generic = if contains_original(&item.i_data) && contains_original(&stock.s_data) { + "B" + } else { + "G" + }; + line_results.push(NewOrderLineResult { + item_id: item.i_id, + item_name: item.i_name, + supply_w_id: line.supply_w_id, + quantity: line.quantity, + stock_quantity: updated_stock_quantity, + item_price_cents: item.i_price_cents, + amount_cents: line_amount_cents, + brand_generic: brand_generic.to_string(), + }); + } + + let taxed = apply_tax( + subtotal_cents, + i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), + ); + let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); + + Ok(NewOrderResult { + warehouse_tax_bps: warehouse.w_tax_bps, + district_tax_bps: district.d_tax_bps, + customer_discount_bps: customer.c_discount_bps, + customer_last: customer.c_last, + customer_credit: customer.c_credit, + order_id, + entry_d: tx.timestamp, + total_amount_cents, + all_local, + lines: line_results, + }) +} + +fn payment_tx(tx: &spacetimedb::TxContext, req: PaymentRequest<'_>) -> Result { + ensure!(req.payment_amount_cents > 0, "payment amount must be positive"); + + let warehouse = find_warehouse(tx, req.w_id)?; + let district = find_district(tx, req.w_id, req.d_id)?; + let customer = resolve_customer(tx, req.c_w_id, req.c_d_id, req.customer_selector)?; + + tx.db.warehouse().w_id().update(Warehouse { + w_ytd_cents: warehouse.w_ytd_cents + req.payment_amount_cents, + ..warehouse.clone() + }); + + tx.db.district().district_key().update(District { + d_ytd_cents: district.d_ytd_cents + req.payment_amount_cents, + ..district.clone() + }); + + let mut updated_customer = Customer { + c_balance_cents: customer.c_balance_cents - req.payment_amount_cents, + c_ytd_payment_cents: customer.c_ytd_payment_cents + req.payment_amount_cents, + c_payment_cnt: customer.c_payment_cnt + 1, + ..customer.clone() + }; + + if updated_customer.c_credit == "BC" { + let prefix = format!( + "{} {} {} {} {} {} {}|", + updated_customer.c_id, + updated_customer.c_d_id, + updated_customer.c_w_id, + req.d_id, + req.w_id, + req.payment_amount_cents, + req.now.to_micros_since_unix_epoch() + ); + updated_customer.c_data = format!("{prefix}{}", updated_customer.c_data); + updated_customer.c_data.truncate(MAX_C_DATA_LEN); + } + + tx.db.customer().customer_key().update(updated_customer.clone()); + + tx.db.history().insert(History { + history_id: 0, + h_c_id: updated_customer.c_id, + h_c_d_id: updated_customer.c_d_id, + h_c_w_id: updated_customer.c_w_id, + h_d_id: req.d_id, + h_w_id: req.w_id, + h_date: req.now, + h_amount_cents: req.payment_amount_cents, + h_data: format!("{} {}", warehouse.w_name, district.d_name), + }); + + Ok(PaymentResult { + warehouse_name: warehouse.w_name, + district_name: district.d_name, + customer_id: updated_customer.c_id, + customer_first: updated_customer.c_first, + customer_middle: updated_customer.c_middle, + customer_last: updated_customer.c_last, + customer_balance_cents: updated_customer.c_balance_cents, + customer_credit: updated_customer.c_credit.clone(), + customer_discount_bps: updated_customer.c_discount_bps, + payment_amount_cents: req.payment_amount_cents, + customer_data: if updated_customer.c_credit == "BC" { + Some(updated_customer.c_data) + } else { + None + }, + }) +} + +fn order_status_tx( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + customer_selector: &CustomerSelector, +) -> Result { + let customer = resolve_customer(tx, w_id, d_id, customer_selector)?; + + let mut latest_order: Option = None; + for row in tx + .db + .oorder() + .by_w_d_c_o_id() + .filter((w_id, d_id, customer.c_id, 0u32..)) + { + latest_order = Some(row); + } + + let mut lines = Vec::new(); + if let Some(order) = &latest_order { + for line in tx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, order.o_id, 0u8..)) + { + lines.push(OrderStatusLineResult { + item_id: line.ol_i_id, + supply_w_id: line.ol_supply_w_id, + quantity: line.ol_quantity, + amount_cents: line.ol_amount_cents, + delivery_d: line.ol_delivery_d, + }); + } + } + + Ok(OrderStatusResult { + customer_id: customer.c_id, + customer_first: customer.c_first, + customer_middle: customer.c_middle, + customer_last: customer.c_last, + customer_balance_cents: customer.c_balance_cents, + order_id: latest_order.as_ref().map(|row| row.o_id), + order_entry_d: latest_order.as_ref().map(|row| row.o_entry_d), + carrier_id: latest_order.as_ref().and_then(|row| row.o_carrier_id), + lines, + }) +} + +fn stock_level_tx( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + threshold: i32, +) -> Result { + let district = find_district(tx, w_id, d_id)?; + let start_o_id = district.d_next_o_id.saturating_sub(20); + let end_o_id = district.d_next_o_id; + + let mut item_ids = BTreeSet::new(); + for line in tx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, start_o_id..end_o_id)) + { + item_ids.insert(line.ol_i_id); + } + + let mut low_stock_count = 0u32; + for item_id in item_ids { + let stock = find_stock(tx, w_id, item_id)?; + if stock.s_quantity < threshold { + low_stock_count += 1; + } + } + + Ok(StockLevelResult { + warehouse_id: w_id, + district_id: d_id, + threshold, + low_stock_count, + }) +} + +fn process_delivery_district( + ctx: &ReducerContext, + w_id: u16, + d_id: u8, + carrier_id: u8, + delivered_at: Timestamp, +) -> Result { + let maybe_new_order = ctx.db.new_order_row().by_w_d_o_id().filter((w_id, d_id, 0u32..)).next(); + let Some(new_order) = maybe_new_order else { + return Ok(false); + }; + + let order_key = pack_order_key(w_id, d_id, new_order.no_o_id); + let order = ctx + .db + .oorder() + .order_key() + .find(order_key) + .ok_or_else(|| "delivery referenced missing order".to_string())?; + + ctx.db.new_order_row().new_order_key().delete(new_order.new_order_key); + ctx.db.oorder().order_key().update(OOrder { + o_carrier_id: Some(carrier_id), + ..order.clone() + }); + + let mut total_amount_cents = 0i64; + let order_lines: Vec<_> = ctx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, order.o_id, 0u8..)) + .collect(); + for line in order_lines { + total_amount_cents += line.ol_amount_cents; + ctx.db.order_line().order_line_key().update(OrderLine { + ol_delivery_d: Some(delivered_at), + ..line + }); + } + + let customer = find_customer_by_id_reducer(ctx, w_id, d_id, order.o_c_id)?; + ctx.db.customer().customer_key().update(Customer { + c_balance_cents: customer.c_balance_cents + total_amount_cents, + c_delivery_cnt: customer.c_delivery_cnt + 1, + ..customer + }); + + Ok(true) +} + +fn resolve_customer( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + selector: &CustomerSelector, +) -> Result { + match selector { + CustomerSelector::ById(id) => find_customer_by_id(tx, w_id, d_id, *id), + CustomerSelector::ByLastName(last_name) => { + let rows: Vec<_> = tx + .db + .customer() + .by_w_d_last_first_id() + .filter((w_id, d_id, last_name.as_str(), ""..)) + .collect(); + ensure!(!rows.is_empty(), "customer not found"); + Ok(rows[(rows.len() - 1) / 2].clone()) + } + } +} + +fn find_warehouse(tx: &spacetimedb::TxContext, w_id: u16) -> Result { + tx.db + .warehouse() + .w_id() + .find(w_id) + .ok_or_else(|| format!("warehouse {w_id} not found")) +} + +fn ensure_warehouse_exists(tx: &spacetimedb::TxContext, w_id: u16) -> Result<(), String> { + find_warehouse(tx, w_id).map(|_| ()) +} + +fn find_district(tx: &spacetimedb::TxContext, w_id: u16, d_id: u8) -> Result { + tx.db + .district() + .by_w_d() + .filter((w_id, d_id)) + .next() + .ok_or_else(|| format!("district ({w_id}, {d_id}) not found")) +} + +fn find_customer_by_id(tx: &spacetimedb::TxContext, w_id: u16, d_id: u8, c_id: u32) -> Result { + tx.db + .customer() + .by_w_d_c_id() + .filter((w_id, d_id, c_id)) + .next() + .ok_or_else(|| format!("customer ({w_id}, {d_id}, {c_id}) not found")) +} + +fn find_customer_by_id_reducer(ctx: &ReducerContext, w_id: u16, d_id: u8, c_id: u32) -> Result { + ctx.db + .customer() + .by_w_d_c_id() + .filter((w_id, d_id, c_id)) + .next() + .ok_or_else(|| format!("customer ({w_id}, {d_id}, {c_id}) not found")) +} + +fn find_item(tx: &spacetimedb::TxContext, item_id: u32) -> Result { + tx.db + .item() + .i_id() + .find(item_id) + .ok_or_else(|| format!("item {item_id} not found")) +} + +fn find_stock(tx: &spacetimedb::TxContext, w_id: u16, item_id: u32) -> Result { + tx.db + .stock() + .by_w_i() + .filter((w_id, item_id)) + .next() + .ok_or_else(|| format!("stock ({w_id}, {item_id}) not found")) +} + +fn district_stock_info(stock: &Stock, d_id: u8) -> String { + match d_id { + 1 => stock.s_dist_01.clone(), + 2 => stock.s_dist_02.clone(), + 3 => stock.s_dist_03.clone(), + 4 => stock.s_dist_04.clone(), + 5 => stock.s_dist_05.clone(), + 6 => stock.s_dist_06.clone(), + 7 => stock.s_dist_07.clone(), + 8 => stock.s_dist_08.clone(), + 9 => stock.s_dist_09.clone(), + 10 => stock.s_dist_10.clone(), + _ => String::new(), + } +} + +fn contains_original(data: &str) -> bool { + data.contains("ORIGINAL") +} + +fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { + if current_quantity - ordered_quantity >= 10 { + current_quantity - ordered_quantity + } else { + current_quantity - ordered_quantity + 91 + } +} + +fn apply_tax(amount_cents: i64, total_tax_bps: i64) -> i64 { + amount_cents * (TAX_SCALE + total_tax_bps) / TAX_SCALE +} + +fn apply_discount(amount_cents: i64, discount_bps: i64) -> i64 { + amount_cents * (TAX_SCALE - discount_bps) / TAX_SCALE +} + +fn pack_district_key(w_id: u16, d_id: u8) -> u32 { + (u32::from(w_id) * 100) + u32::from(d_id) +} + +fn pack_customer_key(w_id: u16, d_id: u8, c_id: u32) -> u64 { + ((u64::from(w_id) * 100) + u64::from(d_id)) * 10_000 + u64::from(c_id) +} + +fn pack_stock_key(w_id: u16, item_id: u32) -> u64 { + u64::from(w_id) * 1_000_000 + u64::from(item_id) +} + +fn pack_order_key(w_id: u16, d_id: u8, o_id: u32) -> u64 { + ((u64::from(w_id) * 100) + u64::from(d_id)) * 10_000_000 + u64::from(o_id) +} + +fn pack_order_line_key(w_id: u16, d_id: u8, o_id: u32, ol_number: u8) -> u64 { + pack_order_key(w_id, d_id, o_id) * 100 + u64::from(ol_number) +} + +fn as_delivery_completion_view(row: DeliveryCompletion) -> DeliveryCompletionView { + DeliveryCompletionView { + completion_id: row.completion_id, + run_id: row.run_id, + driver_id: row.driver_id, + terminal_id: row.terminal_id, + request_id: row.request_id, + warehouse_id: row.warehouse_id, + carrier_id: row.carrier_id, + queued_at: row.queued_at, + completed_at: row.completed_at, + skipped_districts: row.skipped_districts, + processed_districts: row.processed_districts, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn middle_customer_selection_uses_lower_middle_for_even_count() { + let idx = (4usize - 1) / 2; + assert_eq!(idx, 1); + } + + #[test] + fn stock_quantity_wraps_like_tpcc() { + assert_eq!(adjust_stock_quantity(20, 5), 15); + assert_eq!(adjust_stock_quantity(10, 5), 96); + } + + #[test] + fn packing_roundtrips_expected_ranges() { + assert!(pack_customer_key(1, 1, 1) < pack_customer_key(1, 1, 2)); + assert!(pack_order_line_key(1, 1, 1, 1) < pack_order_line_key(1, 1, 1, 2)); + } +} diff --git a/tools/tpcc-runner/Cargo.toml b/tools/tpcc-runner/Cargo.toml new file mode 100644 index 00000000000..fdad0563564 --- /dev/null +++ b/tools/tpcc-runner/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "tpcc-runner" +version = "0.1.0" +edition.workspace = true + +[[bin]] +name = "tpcc-runner" +path = "src/main.rs" + +[dependencies] +anyhow.workspace = true +axum.workspace = true +clap.workspace = true +env_logger.workspace = true +log.workspace = true +parking_lot.workspace = true +rand.workspace = true +reqwest.workspace = true +serde.workspace = true +serde_json.workspace = true +tokio.workspace = true +toml.workspace = true + +spacetimedb-sdk = { path = "../../sdks/rust" } + +[lints] +workspace = true diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md new file mode 100644 index 00000000000..93a7106718e --- /dev/null +++ b/tools/tpcc-runner/README.md @@ -0,0 +1,95 @@ +# TPC-C Runner + +`tpcc-runner` is the Rust-side harness for the SpacetimeDB TPC-C module in `modules/tpcc`. + +It supports three subcommands: + +- `load`: populate the module with the initial TPC-C dataset +- `driver`: run one benchmark driver with one logical terminal per SDK connection +- `coordinator`: synchronize multiple remote drivers and aggregate their summaries + +## Local workflow + +1. Publish or start the `modules/tpcc` module. +2. Load data: + +```bash +cargo run -p tpcc-runner -- load --database tpcc --warehouses 1 +``` + +3. Run a single local driver: + +```bash +cargo run -p tpcc-runner -- driver --database tpcc --warehouses 1 --terminals 10 --warmup-secs 5 --measure-secs 30 +``` + +The driver writes: + +- `summary.json` +- `txn_events.ndjson` + +under `tpcc-results///` unless `--output-dir` is provided. + +## Distributed workflow + +Start the coordinator: + +```bash +cargo run -p tpcc-runner -- coordinator --expected-drivers 2 --warmup-secs 5 --measure-secs 30 +``` + +Start each remote driver with disjoint terminal ranges: + +```bash +cargo run -p tpcc-runner -- driver --database tpcc --warehouses 2 --terminal-start 1 --terminals 10 --coordinator-url http://coordinator-host:7878 +cargo run -p tpcc-runner -- driver --database tpcc --warehouses 2 --terminal-start 11 --terminals 10 --coordinator-url http://coordinator-host:7878 +``` + +When all expected drivers register, the coordinator publishes a common schedule and writes an aggregated `summary.json` under `tpcc-results/coordinator//`. + +## Config file + +All subcommands accept `--config `. The file is TOML with optional sections: + +```toml +[connection] +uri = "http://127.0.0.1:3000" +database = "tpcc" +confirmed_reads = true +timeout_secs = 30 + +[load] +warehouses = 1 +batch_size = 500 +reset = true + +[driver] +driver_id = "driver-a" +terminal_start = 1 +terminals = 10 +warehouses = 1 +warmup_secs = 5 +measure_secs = 30 +delivery_wait_secs = 60 +keying_time_scale = 1.0 +think_time_scale = 1.0 + +[coordinator] +run_id = "tpcc-demo" +listen = "127.0.0.1:7878" +expected_drivers = 2 +warmup_secs = 5 +measure_secs = 30 +output_dir = "tpcc-results/coordinator" +``` + +CLI flags override config-file values. + +## Regenerating bindings + +If the module signatures change, regenerate the Rust SDK bindings: + +```bash +cargo build -p spacetimedb-standalone +cargo run -p spacetimedb-cli -- generate --lang rust --out-dir tools/tpcc-runner/src/module_bindings --module-path modules/tpcc --yes +``` diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs new file mode 100644 index 00000000000..01639c70eed --- /dev/null +++ b/tools/tpcc-runner/src/client.rs @@ -0,0 +1,332 @@ +use anyhow::{anyhow, bail, Context, Result}; +use std::sync::mpsc::sync_channel; +use std::thread::JoinHandle; +use std::time::Duration; + +use crate::config::ConnectionConfig; +use crate::module_bindings::*; +use spacetimedb_sdk::DbContext; + +pub struct ModuleClient { + conn: DbConnection, + thread: Option>, + timeout: Duration, +} + +impl ModuleClient { + pub fn connect(config: &ConnectionConfig) -> Result { + let (ready_tx, ready_rx) = sync_channel(1); + let success_tx = ready_tx.clone(); + let error_tx = ready_tx; + let mut builder = DbConnection::builder() + .with_uri(config.uri.clone()) + .with_database_name(config.database.clone()) + .with_confirmed_reads(config.confirmed_reads) + .on_connect(move |_, _, _| { + let _ = success_tx.send(Ok::<(), anyhow::Error>(())); + }) + .on_connect_error(move |_, error| { + let _ = error_tx.send(Err(anyhow!("connection failed: {error}"))); + }); + + if let Some(token) = &config.token { + builder = builder.with_token(Some(token.clone())); + } + + let conn = builder.build().context("failed to build database connection")?; + let thread = conn.run_threaded(); + ready_rx + .recv_timeout(Duration::from_secs(config.timeout_secs)) + .context("timed out waiting for connection")??; + + Ok(Self { + conn, + thread: Some(thread), + timeout: Duration::from_secs(config.timeout_secs), + }) + } + + pub fn reset_tpcc(&self) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.reset_tpcc_then(move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("reset_tpcc failed: {}", message), + Ok(Err(err)) => Err(anyhow!("reset_tpcc internal error: {}", err)), + Err(_) => bail!("timed out waiting for reset_tpcc"), + } + } + + pub fn load_warehouses(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_warehouses_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_warehouses failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_warehouses internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_warehouses"), + } + } + + pub fn load_districts(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_districts_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_districts failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_districts internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_districts"), + } + } + + pub fn load_customers(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_customers_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_customers failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_customers internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_customers"), + } + } + + pub fn load_history(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_history_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_history failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_history internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_history"), + } + } + + pub fn load_items(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_items_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_items failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_items internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_items"), + } + } + + pub fn load_stocks(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_stocks_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_stocks failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_stocks internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_stocks"), + } + } + + pub fn load_orders(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_orders_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_orders failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_orders internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_orders"), + } + } + + pub fn load_new_orders(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_new_orders_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_new_orders failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_new_orders internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_new_orders"), + } + } + + pub fn load_order_lines(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_order_lines_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_order_lines failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_order_lines internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_order_lines"), + } + } + + pub fn new_order( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + ) -> Result> { + let (tx, rx) = sync_channel(1); + self.conn + .procedures + .new_order_then(w_id, d_id, c_id, order_lines, move |_, res| { + let _ = tx.send(res); + }); + match rx.recv_timeout(self.timeout) { + Ok(Ok(value)) => Ok(value), + Ok(Err(err)) => Err(anyhow!("new_order internal error: {}", err)), + Err(_) => bail!("timed out waiting for new_order"), + } + } + + pub fn payment( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + ) -> Result> { + let (tx, rx) = sync_channel(1); + self.conn.procedures.payment_then( + w_id, + d_id, + c_w_id, + c_d_id, + customer, + payment_amount_cents, + move |_, res| { + let _ = tx.send(res); + }, + ); + match rx.recv_timeout(self.timeout) { + Ok(Ok(value)) => Ok(value), + Ok(Err(err)) => Err(anyhow!("payment internal error: {}", err)), + Err(_) => bail!("timed out waiting for payment"), + } + } + + pub fn order_status( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + ) -> Result> { + let (tx, rx) = sync_channel(1); + self.conn + .procedures + .order_status_then(w_id, d_id, customer, move |_, res| { + let _ = tx.send(res); + }); + match rx.recv_timeout(self.timeout) { + Ok(Ok(value)) => Ok(value), + Ok(Err(err)) => Err(anyhow!("order_status internal error: {}", err)), + Err(_) => bail!("timed out waiting for order_status"), + } + } + + pub fn stock_level(&self, w_id: u16, d_id: u8, threshold: i32) -> Result> { + let (tx, rx) = sync_channel(1); + self.conn + .procedures + .stock_level_then(w_id, d_id, threshold, move |_, res| { + let _ = tx.send(res); + }); + match rx.recv_timeout(self.timeout) { + Ok(Ok(value)) => Ok(value), + Ok(Err(err)) => Err(anyhow!("stock_level internal error: {}", err)), + Err(_) => bail!("timed out waiting for stock_level"), + } + } + + pub fn queue_delivery( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + ) -> Result> { + let (tx, rx) = sync_channel(1); + self.conn.procedures.queue_delivery_then( + run_id, + driver_id, + terminal_id, + request_id, + w_id, + carrier_id, + move |_, res| { + let _ = tx.send(res); + }, + ); + match rx.recv_timeout(self.timeout) { + Ok(Ok(value)) => Ok(value), + Ok(Err(err)) => Err(anyhow!("queue_delivery internal error: {}", err)), + Err(_) => bail!("timed out waiting for queue_delivery"), + } + } + + pub fn delivery_progress(&self, run_id: String) -> Result> { + let (tx, rx) = sync_channel(1); + self.conn.procedures.delivery_progress_then(run_id, move |_, res| { + let _ = tx.send(res); + }); + match rx.recv_timeout(self.timeout) { + Ok(Ok(value)) => Ok(value), + Ok(Err(err)) => Err(anyhow!("delivery_progress internal error: {}", err)), + Err(_) => bail!("timed out waiting for delivery_progress"), + } + } + + pub fn fetch_delivery_completions( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + ) -> Result, String>> { + let (tx, rx) = sync_channel(1); + self.conn + .procedures + .fetch_delivery_completions_then(run_id, after_completion_id, limit, move |_, res| { + let _ = tx.send(res); + }); + match rx.recv_timeout(self.timeout) { + Ok(Ok(value)) => Ok(value), + Ok(Err(err)) => Err(anyhow!("fetch_delivery_completions internal error: {}", err)), + Err(_) => bail!("timed out waiting for fetch_delivery_completions"), + } + } + + pub fn shutdown(mut self) { + let _ = self.conn.disconnect(); + if let Some(thread) = self.thread.take() { + let _ = thread.join(); + } + } +} + +pub fn expect_ok(operation: &str, result: Result>) -> Result { + match result? { + Ok(value) => Ok(value), + Err(message) => bail!("{} failed: {}", operation, message), + } +} diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs new file mode 100644 index 00000000000..4042cb85f59 --- /dev/null +++ b/tools/tpcc-runner/src/config.rs @@ -0,0 +1,306 @@ +use anyhow::{bail, Context, Result}; +use clap::{Args, Parser, Subcommand}; +use serde::Deserialize; +use std::fs; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; + +#[derive(Debug, Parser)] +#[command(name = "tpcc-runner")] +pub struct Cli { + #[arg(long)] + pub config: Option, + #[command(subcommand)] + pub command: Command, +} + +#[derive(Debug, Subcommand)] +pub enum Command { + Load(LoadArgs), + Driver(DriverArgs), + Coordinator(CoordinatorArgs), +} + +#[derive(Debug, Clone)] +pub struct ConnectionConfig { + pub uri: String, + pub database: String, + pub token: Option, + pub confirmed_reads: bool, + pub timeout_secs: u64, +} + +#[derive(Debug, Clone)] +pub struct LoadConfig { + pub connection: ConnectionConfig, + pub warehouses: u16, + pub batch_size: usize, + pub reset: bool, +} + +#[derive(Debug, Clone)] +pub struct DriverConfig { + pub connection: ConnectionConfig, + pub run_id: Option, + pub driver_id: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, + pub warmup_secs: u64, + pub measure_secs: u64, + pub output_dir: Option, + pub coordinator_url: Option, + pub delivery_wait_secs: u64, + pub keying_time_scale: f64, + pub think_time_scale: f64, +} + +#[derive(Debug, Clone)] +pub struct CoordinatorConfig { + pub run_id: String, + pub listen: SocketAddr, + pub expected_drivers: usize, + pub warmup_secs: u64, + pub measure_secs: u64, + pub output_dir: PathBuf, +} + +#[derive(Debug, Clone, Args)] +pub struct LoadArgs { + #[command(flatten)] + pub connection: ConnectionArgs, + #[arg(long)] + pub warehouses: Option, + #[arg(long)] + pub batch_size: Option, + #[arg(long)] + pub reset: Option, +} + +#[derive(Debug, Clone, Args)] +pub struct DriverArgs { + #[command(flatten)] + pub connection: ConnectionArgs, + #[arg(long)] + pub run_id: Option, + #[arg(long)] + pub driver_id: Option, + #[arg(long)] + pub terminal_start: Option, + #[arg(long)] + pub terminals: Option, + #[arg(long)] + pub warehouses: Option, + #[arg(long)] + pub warmup_secs: Option, + #[arg(long)] + pub measure_secs: Option, + #[arg(long)] + pub output_dir: Option, + #[arg(long)] + pub coordinator_url: Option, + #[arg(long)] + pub delivery_wait_secs: Option, + #[arg(long)] + pub keying_time_scale: Option, + #[arg(long)] + pub think_time_scale: Option, +} + +#[derive(Debug, Clone, Args)] +pub struct CoordinatorArgs { + #[arg(long)] + pub run_id: Option, + #[arg(long)] + pub listen: Option, + #[arg(long)] + pub expected_drivers: Option, + #[arg(long)] + pub warmup_secs: Option, + #[arg(long)] + pub measure_secs: Option, + #[arg(long)] + pub output_dir: Option, +} + +#[derive(Debug, Clone, Default, Args)] +pub struct ConnectionArgs { + #[arg(long)] + pub uri: Option, + #[arg(long)] + pub database: Option, + #[arg(long)] + pub token: Option, + #[arg(long)] + pub confirmed_reads: Option, + #[arg(long)] + pub timeout_secs: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct FileConfig { + #[serde(default)] + connection: FileConnectionConfig, + #[serde(default)] + load: FileLoadConfig, + #[serde(default)] + driver: FileDriverConfig, + #[serde(default)] + coordinator: FileCoordinatorConfig, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileConnectionConfig { + uri: Option, + database: Option, + token: Option, + confirmed_reads: Option, + timeout_secs: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileLoadConfig { + warehouses: Option, + batch_size: Option, + reset: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileDriverConfig { + run_id: Option, + driver_id: Option, + terminal_start: Option, + terminals: Option, + warehouses: Option, + warmup_secs: Option, + measure_secs: Option, + output_dir: Option, + coordinator_url: Option, + delivery_wait_secs: Option, + keying_time_scale: Option, + think_time_scale: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileCoordinatorConfig { + run_id: Option, + listen: Option, + expected_drivers: Option, + warmup_secs: Option, + measure_secs: Option, + output_dir: Option, +} + +impl FileConfig { + pub fn load(path: Option<&Path>) -> Result { + let Some(path) = path else { + return Ok(Self::default()); + }; + let raw = fs::read_to_string(path).with_context(|| format!("failed to read config {}", path.display()))?; + toml::from_str(&raw).with_context(|| format!("failed to parse config {}", path.display())) + } +} + +impl ConnectionArgs { + fn resolve(&self, file: &FileConnectionConfig) -> ConnectionConfig { + ConnectionConfig { + uri: self + .uri + .clone() + .or_else(|| file.uri.clone()) + .unwrap_or_else(|| "http://127.0.0.1:3000".to_string()), + database: self + .database + .clone() + .or_else(|| file.database.clone()) + .unwrap_or_else(|| "tpcc".to_string()), + token: self.token.clone().or_else(|| file.token.clone()), + confirmed_reads: self.confirmed_reads.or(file.confirmed_reads).unwrap_or(true), + timeout_secs: self.timeout_secs.or(file.timeout_secs).unwrap_or(30), + } + } +} + +impl LoadArgs { + pub fn resolve(&self, file: &FileConfig) -> LoadConfig { + LoadConfig { + connection: self.connection.resolve(&file.connection), + warehouses: self.warehouses.or(file.load.warehouses).unwrap_or(1), + batch_size: self.batch_size.or(file.load.batch_size).unwrap_or(500), + reset: self.reset.or(file.load.reset).unwrap_or(true), + } + } +} + +impl DriverArgs { + pub fn resolve(&self, file: &FileConfig) -> Result { + let connection = self.connection.resolve(&file.connection); + let warehouse_count = self.warehouses.or(file.driver.warehouses).unwrap_or(1); + let terminals = self + .terminals + .or(file.driver.terminals) + .unwrap_or(u32::from(warehouse_count) * 10); + let terminal_start = self.terminal_start.or(file.driver.terminal_start).unwrap_or(1); + if terminals == 0 { + bail!("terminal count must be positive"); + } + Ok(DriverConfig { + connection, + run_id: self.run_id.clone().or_else(|| file.driver.run_id.clone()), + driver_id: self + .driver_id + .clone() + .or_else(|| file.driver.driver_id.clone()) + .unwrap_or_else(default_driver_id), + terminal_start, + terminals, + warehouse_count, + warmup_secs: self.warmup_secs.or(file.driver.warmup_secs).unwrap_or(5), + measure_secs: self.measure_secs.or(file.driver.measure_secs).unwrap_or(30), + output_dir: self.output_dir.clone().or_else(|| file.driver.output_dir.clone()), + coordinator_url: self + .coordinator_url + .clone() + .or_else(|| file.driver.coordinator_url.clone()), + delivery_wait_secs: self.delivery_wait_secs.or(file.driver.delivery_wait_secs).unwrap_or(60), + keying_time_scale: self.keying_time_scale.or(file.driver.keying_time_scale).unwrap_or(1.0), + think_time_scale: self.think_time_scale.or(file.driver.think_time_scale).unwrap_or(1.0), + }) + } +} + +impl CoordinatorArgs { + pub fn resolve(&self, file: &FileConfig) -> Result { + let expected_drivers = self.expected_drivers.or(file.coordinator.expected_drivers).unwrap_or(1); + if expected_drivers == 0 { + bail!("expected_drivers must be positive"); + } + Ok(CoordinatorConfig { + run_id: self + .run_id + .clone() + .or_else(|| file.coordinator.run_id.clone()) + .unwrap_or_else(default_run_id), + listen: self + .listen + .or(file.coordinator.listen) + .unwrap_or_else(|| "127.0.0.1:7878".parse().expect("hard-coded coordinator address")), + expected_drivers, + warmup_secs: self.warmup_secs.or(file.coordinator.warmup_secs).unwrap_or(5), + measure_secs: self.measure_secs.or(file.coordinator.measure_secs).unwrap_or(30), + output_dir: self + .output_dir + .clone() + .or_else(|| file.coordinator.output_dir.clone()) + .unwrap_or_else(|| PathBuf::from("tpcc-results/coordinator")), + }) + } +} + +pub fn default_run_id() -> String { + format!("tpcc-{}", crate::summary::now_millis()) +} + +pub fn default_driver_id() -> String { + format!("driver-{}", std::process::id()) +} diff --git a/tools/tpcc-runner/src/coordinator.rs b/tools/tpcc-runner/src/coordinator.rs new file mode 100644 index 00000000000..f9060f5520d --- /dev/null +++ b/tools/tpcc-runner/src/coordinator.rs @@ -0,0 +1,125 @@ +use anyhow::{Context, Result}; +use axum::extract::State; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use parking_lot::Mutex; +use std::collections::BTreeMap; +use std::fs; +use std::path::Path; +use std::sync::Arc; + +use crate::config::CoordinatorConfig; +use crate::protocol::{ + RegisterDriverRequest, RegisterDriverResponse, RunSchedule, ScheduleResponse, SubmitSummaryRequest, +}; +use crate::summary::{aggregate_summaries, now_millis, write_json, AggregateSummary, DriverSummary}; + +#[derive(Clone)] +struct AppState { + inner: Arc>, +} + +struct CoordinatorState { + config: CoordinatorConfig, + registrations: BTreeMap, + schedule: Option, + summaries: BTreeMap, +} + +pub async fn run(config: CoordinatorConfig) -> Result<()> { + fs::create_dir_all(&config.output_dir) + .with_context(|| format!("failed to create {}", config.output_dir.display()))?; + + let state = AppState { + inner: Arc::new(Mutex::new(CoordinatorState { + config: config.clone(), + registrations: BTreeMap::new(), + schedule: None, + summaries: BTreeMap::new(), + })), + }; + + let app = Router::new() + .route("/register", post(register_driver)) + .route("/schedule", get(get_schedule)) + .route("/summary", post(submit_summary)) + .with_state(state); + + let listener = tokio::net::TcpListener::bind(config.listen) + .await + .with_context(|| format!("failed to bind {}", config.listen))?; + log::info!("coordinator listening on {}", config.listen); + axum::serve(listener, app).await.context("coordinator server exited") +} + +async fn register_driver( + State(state): State, + Json(request): Json, +) -> Json { + let mut inner = state.inner.lock(); + inner.registrations.insert(request.driver_id.clone(), request); + maybe_create_schedule(&mut inner); + Json(RegisterDriverResponse { accepted: true }) +} + +async fn get_schedule(State(state): State) -> Json { + let inner = state.inner.lock(); + Json(ScheduleResponse { + ready: inner.schedule.is_some(), + schedule: inner.schedule.clone(), + }) +} + +async fn submit_summary( + State(state): State, + Json(request): Json, +) -> Result, axum::http::StatusCode> { + let aggregate = { + let mut inner = state.inner.lock(); + inner + .summaries + .insert(request.summary.driver_id.clone(), request.summary.clone()); + if inner.summaries.len() == inner.config.expected_drivers { + let summaries: Vec<_> = inner.summaries.values().cloned().collect(); + let aggregate = aggregate_summaries(inner.config.run_id.clone(), &summaries); + if let Err(err) = write_aggregate(&inner.config.output_dir, &aggregate) { + log::error!("failed to write aggregate summary: {err:#}"); + return Err(axum::http::StatusCode::INTERNAL_SERVER_ERROR); + } + aggregate + } else { + aggregate_summaries( + inner.config.run_id.clone(), + &inner.summaries.values().cloned().collect::>(), + ) + } + }; + Ok(Json(aggregate)) +} + +fn maybe_create_schedule(inner: &mut CoordinatorState) { + if inner.schedule.is_some() || inner.registrations.len() < inner.config.expected_drivers { + return; + } + let warmup_start_ms = now_millis() + 2_000; + let measure_start_ms = warmup_start_ms + (inner.config.warmup_secs * 1_000); + let measure_end_ms = measure_start_ms + (inner.config.measure_secs * 1_000); + inner.schedule = Some(RunSchedule { + run_id: inner.config.run_id.clone(), + warmup_start_ms, + measure_start_ms, + measure_end_ms, + stop_ms: measure_end_ms, + }); + log::info!( + "all {} driver(s) registered; schedule ready for run {}", + inner.config.expected_drivers, + inner.config.run_id + ); +} + +fn write_aggregate(output_dir: &Path, aggregate: &AggregateSummary) -> Result<()> { + let run_dir = output_dir.join(&aggregate.run_id); + fs::create_dir_all(&run_dir).with_context(|| format!("failed to create {}", run_dir.display()))?; + write_json(&run_dir.join("summary.json"), aggregate) +} diff --git a/tools/tpcc-runner/src/driver.rs b/tools/tpcc-runner/src/driver.rs new file mode 100644 index 00000000000..dd7086db45f --- /dev/null +++ b/tools/tpcc-runner/src/driver.rs @@ -0,0 +1,619 @@ +use anyhow::{anyhow, bail, Context, Result}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::Arc; +use std::thread; +use std::time::Duration; + +use crate::client::{expect_ok, ModuleClient}; +use crate::config::{default_run_id, DriverConfig}; +use crate::module_bindings::*; +use crate::protocol::{ + RegisterDriverRequest, RegisterDriverResponse, RunSchedule, ScheduleResponse, SubmitSummaryRequest, +}; +use crate::summary::{write_json, DriverSummary, DriverSummaryMeta, SharedMetrics, TransactionKind, TransactionRecord}; +use crate::tpcc::*; + +struct TerminalRuntime { + config: DriverConfig, + metrics: SharedMetrics, + abort: Arc, + request_ids: Arc, + schedule: RunSchedule, + run_constants: RunConstants, + assignment: TerminalAssignment, + seed: u64, +} + +struct TransactionContext<'a> { + client: &'a ModuleClient, + config: &'a DriverConfig, + run_id: &'a str, + driver_id: &'a str, + assignment: &'a TerminalAssignment, + constants: &'a RunConstants, + request_ids: &'a AtomicU64, +} + +pub async fn run(config: DriverConfig) -> Result<()> { + let schedule = resolve_schedule(&config).await?; + let run_id = schedule.run_id.clone(); + let output_dir = resolve_output_dir(&config, &run_id); + fs::create_dir_all(&output_dir).with_context(|| format!("failed to create {}", output_dir.display()))?; + + let events_path = output_dir.join("txn_events.ndjson"); + let summary_path = output_dir.join("summary.json"); + let metrics = SharedMetrics::create(&run_id, &config.driver_id, &events_path)?; + + let run_constants = { + let mut rng = StdRng::seed_from_u64(schedule.measure_start_ms ^ u64::from(config.terminal_start)); + generate_run_constants(&mut rng) + }; + + let abort = Arc::new(AtomicBool::new(false)); + let request_ids = Arc::new(AtomicU64::new(1)); + let mut handles = Vec::with_capacity(config.terminals as usize); + + for offset in 0..config.terminals { + let terminal_id = config.terminal_start + offset; + let assignment = assign_terminal(terminal_id, config.warehouse_count).ok_or_else(|| { + anyhow!( + "terminal {} exceeds warehouse capacity {}", + terminal_id, + config.warehouse_count + ) + })?; + let terminal_seed = schedule.measure_start_ms ^ ((terminal_id as u64) << 32) ^ 0xabcdu64; + let terminal_config = config.clone(); + let terminal_metrics = metrics.clone(); + let terminal_abort = abort.clone(); + let terminal_constants = run_constants.clone(); + let terminal_schedule = schedule.clone(); + let terminal_request_ids = request_ids.clone(); + let runtime = TerminalRuntime { + config: terminal_config, + metrics: terminal_metrics, + abort: terminal_abort, + request_ids: terminal_request_ids, + schedule: terminal_schedule, + run_constants: terminal_constants, + assignment, + seed: terminal_seed, + }; + handles.push(thread::spawn(move || run_terminal(runtime))); + } + + let mut first_error: Option = None; + for handle in handles { + match handle.join() { + Ok(Ok(())) => {} + Ok(Err(err)) => { + abort.store(true, Ordering::Relaxed); + if first_error.is_none() { + first_error = Some(err); + } + } + Err(_) => { + abort.store(true, Ordering::Relaxed); + if first_error.is_none() { + first_error = Some(anyhow!("terminal thread panicked")); + } + } + } + } + if let Some(err) = first_error { + return Err(err); + } + + harvest_delivery_completions(&config, &schedule, &metrics).await?; + + let summary = metrics.finalize(DriverSummaryMeta { + run_id: run_id.clone(), + driver_id: config.driver_id.clone(), + uri: config.connection.uri.clone(), + database: config.connection.database.clone(), + terminal_start: config.terminal_start, + terminals: config.terminals, + warehouse_count: config.warehouse_count, + warmup_secs: config.warmup_secs, + measure_secs: config.measure_secs, + measure_start_ms: schedule.measure_start_ms, + measure_end_ms: schedule.measure_end_ms, + })?; + write_json(&summary_path, &summary)?; + print_summary(&summary, &summary_path, &events_path); + + if let Some(coordinator_url) = &config.coordinator_url { + submit_summary(coordinator_url, summary).await?; + } + + Ok(()) +} + +fn run_terminal(runtime: TerminalRuntime) -> Result<()> { + let TerminalRuntime { + config, + metrics, + abort, + request_ids, + schedule, + run_constants, + assignment, + seed, + } = runtime; + let client = ModuleClient::connect(&config.connection)?; + sleep_until_ms(schedule.warmup_start_ms); + + let mut rng = StdRng::seed_from_u64(seed); + while !abort.load(Ordering::Relaxed) { + if crate::summary::now_millis() >= schedule.stop_ms { + break; + } + + let kind = choose_transaction(&mut rng); + let started_ms = crate::summary::now_millis(); + let context = TransactionContext { + client: &client, + config: &config, + run_id: &schedule.run_id, + driver_id: &config.driver_id, + assignment: &assignment, + constants: &run_constants, + request_ids: &request_ids, + }; + let event = execute_transaction(&context, kind, &mut rng, started_ms); + + match event { + Ok(record) => { + if record.timestamp_ms >= schedule.measure_start_ms && record.timestamp_ms < schedule.measure_end_ms { + metrics.record(record)?; + } + } + Err(err) => { + abort.store(true, Ordering::Relaxed); + client.shutdown(); + return Err(err); + } + } + + let delay = keying_time(kind, config.keying_time_scale) + think_time(kind, config.think_time_scale, &mut rng); + if !delay.is_zero() && crate::summary::now_millis() < schedule.stop_ms { + thread::sleep(delay); + } + } + + client.shutdown(); + Ok(()) +} + +fn execute_transaction( + context: &TransactionContext<'_>, + kind: TransactionKind, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + match kind { + TransactionKind::NewOrder => execute_new_order( + context.client, + context.config.warehouse_count, + context.assignment, + context.constants, + rng, + started_ms, + ), + TransactionKind::Payment => execute_payment( + context.client, + context.config.warehouse_count, + context.assignment, + context.constants, + rng, + started_ms, + ), + TransactionKind::OrderStatus => { + execute_order_status(context.client, context.assignment, context.constants, rng, started_ms) + } + TransactionKind::Delivery => execute_delivery( + context.client, + context.run_id, + context.driver_id, + context.assignment, + context.request_ids, + rng, + started_ms, + ), + TransactionKind::StockLevel => execute_stock_level(context.client, context.assignment, rng, started_ms), + } +} + +fn execute_new_order( + client: &ModuleClient, + warehouse_count: u16, + assignment: &TerminalAssignment, + constants: &RunConstants, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let customer_id = customer_id(rng, constants); + let line_count = rng.random_range(5..=15); + let invalid_line = rng.random_bool(0.01); + let mut order_lines = Vec::with_capacity(line_count); + let mut remote_order_line_count = 0u32; + for idx in 0..line_count { + let remote = warehouse_count > 1 && rng.random_bool(0.01); + let supply_w_id = if remote { + remote_order_line_count += 1; + let mut remote = assignment.warehouse_id; + while remote == assignment.warehouse_id { + remote = rng.random_range(1..=warehouse_count); + } + remote + } else { + assignment.warehouse_id + }; + let item_id = if invalid_line && idx + 1 == line_count { + ITEMS + 1 + } else { + item_id(rng, constants) + }; + order_lines.push(NewOrderLineInput { + item_id, + supply_w_id, + quantity: rng.random_range(1..=10), + }); + } + + let result = client.new_order( + assignment.warehouse_id, + assignment.district_id, + customer_id, + order_lines, + )?; + let finished_ms = crate::summary::now_millis(); + match result { + Ok(_) => Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::NewOrder, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name: false, + order_line_count: line_count as u32, + remote_order_line_count, + detail: None, + }), + Err(message) if invalid_line => Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::NewOrder, + success: false, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: true, + remote: false, + by_last_name: false, + order_line_count: line_count as u32, + remote_order_line_count, + detail: Some(message), + }), + Err(message) => bail!( + "unexpected new_order failure for terminal {}: {}", + assignment.terminal_id, + message + ), + } +} + +fn execute_payment( + client: &ModuleClient, + warehouse_count: u16, + assignment: &TerminalAssignment, + constants: &RunConstants, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let remote = warehouse_count > 1 && rng.random_bool(0.15); + let c_w_id = if remote { + let mut other = assignment.warehouse_id; + while other == assignment.warehouse_id { + other = rng.random_range(1..=warehouse_count); + } + other + } else { + assignment.warehouse_id + }; + let c_d_id = if remote { + rng.random_range(1..=DISTRICTS_PER_WAREHOUSE) + } else { + assignment.district_id + }; + let by_last_name = rng.random_bool(0.60); + let selector = if by_last_name { + CustomerSelector::ByLastName(customer_last_name(rng, constants)) + } else { + CustomerSelector::ById(customer_id(rng, constants)) + }; + let amount_cents = rng.random_range(100..=500_000); + let finished = expect_ok( + "payment", + client.payment( + assignment.warehouse_id, + assignment.district_id, + c_w_id, + c_d_id, + selector, + amount_cents, + ), + )?; + let _ = finished; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::Payment, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote, + by_last_name, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +fn execute_order_status( + client: &ModuleClient, + assignment: &TerminalAssignment, + constants: &RunConstants, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let by_last_name = rng.random_bool(0.60); + let selector = if by_last_name { + CustomerSelector::ByLastName(customer_last_name(rng, constants)) + } else { + CustomerSelector::ById(customer_id(rng, constants)) + }; + let _ = expect_ok( + "order_status", + client.order_status(assignment.warehouse_id, assignment.district_id, selector), + )?; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::OrderStatus, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +fn execute_delivery( + client: &ModuleClient, + run_id: &str, + driver_id: &str, + assignment: &TerminalAssignment, + request_ids: &AtomicU64, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let request_id = request_ids.fetch_add(1, Ordering::Relaxed); + let _ = expect_ok( + "queue_delivery", + client.queue_delivery( + run_id.to_string(), + driver_id.to_string(), + assignment.terminal_id, + request_id, + assignment.warehouse_id, + rng.random_range(1..=10), + ), + )?; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::Delivery, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name: false, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +fn execute_stock_level( + client: &ModuleClient, + assignment: &TerminalAssignment, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let threshold = rng.random_range(10..=20); + let _ = expect_ok( + "stock_level", + client.stock_level(assignment.warehouse_id, assignment.district_id, threshold), + )?; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::StockLevel, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name: false, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +async fn resolve_schedule(config: &DriverConfig) -> Result { + if let Some(coordinator_url) = &config.coordinator_url { + let client = reqwest::Client::new(); + let register = RegisterDriverRequest { + driver_id: config.driver_id.clone(), + terminal_start: config.terminal_start, + terminals: config.terminals, + warehouse_count: config.warehouse_count, + }; + let response: RegisterDriverResponse = client + .post(format!("{}/register", coordinator_url)) + .json(®ister) + .send() + .await + .context("failed to register driver with coordinator")? + .error_for_status() + .context("coordinator rejected register request")? + .json() + .await + .context("failed to decode register response")?; + if !response.accepted { + bail!("coordinator did not accept driver registration"); + } + loop { + let response: ScheduleResponse = client + .get(format!("{}/schedule", coordinator_url)) + .send() + .await + .context("failed to poll coordinator schedule")? + .error_for_status() + .context("coordinator schedule endpoint returned error")? + .json() + .await + .context("failed to decode schedule response")?; + if let Some(schedule) = response.schedule { + return Ok(schedule); + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + } + + let run_id = config.run_id.clone().unwrap_or_else(default_run_id); + let warmup_start_ms = crate::summary::now_millis() + 2_000; + let measure_start_ms = warmup_start_ms + (config.warmup_secs * 1_000); + let measure_end_ms = measure_start_ms + (config.measure_secs * 1_000); + Ok(RunSchedule { + run_id, + warmup_start_ms, + measure_start_ms, + measure_end_ms, + stop_ms: measure_end_ms, + }) +} + +async fn harvest_delivery_completions( + config: &DriverConfig, + schedule: &RunSchedule, + metrics: &SharedMetrics, +) -> Result<()> { + let expected = metrics.delivery_queued(); + if expected == 0 { + return Ok(()); + } + let client = ModuleClient::connect(&config.connection)?; + let progress = expect_ok("delivery_progress", client.delivery_progress(schedule.run_id.clone()))?; + log::info!( + "delivery progress before harvest: pending_jobs={} completed_jobs={}", + progress.pending_jobs, + progress.completed_jobs + ); + let deadline = crate::summary::now_millis() + (config.delivery_wait_secs * 1_000); + let mut seen_for_driver = 0u64; + let mut after_completion_id = 0u64; + + loop { + if seen_for_driver >= expected { + break; + } + let batch = expect_ok( + "fetch_delivery_completions", + client.fetch_delivery_completions(schedule.run_id.clone(), after_completion_id, 512), + )?; + if batch.is_empty() { + if crate::summary::now_millis() >= deadline { + break; + } + tokio::time::sleep(Duration::from_millis(250)).await; + continue; + } + for row in batch { + after_completion_id = after_completion_id.max(row.completion_id); + if row.driver_id == config.driver_id { + seen_for_driver += 1; + metrics.record_delivery_completion(&row); + } + } + } + + if seen_for_driver < expected { + log::warn!( + "driver {} observed only {} / {} delivery completions before timeout", + config.driver_id, + seen_for_driver, + expected + ); + } + + client.shutdown(); + Ok(()) +} + +async fn submit_summary(coordinator_url: &str, summary: DriverSummary) -> Result<()> { + let client = reqwest::Client::new(); + client + .post(format!("{}/summary", coordinator_url)) + .json(&SubmitSummaryRequest { summary }) + .send() + .await + .context("failed to submit summary to coordinator")? + .error_for_status() + .context("coordinator rejected summary")?; + Ok(()) +} + +fn resolve_output_dir(config: &DriverConfig, run_id: &str) -> PathBuf { + match &config.output_dir { + Some(path) => path.clone(), + None => PathBuf::from("tpcc-results").join(run_id).join(&config.driver_id), + } +} + +fn print_summary(summary: &DriverSummary, summary_path: &Path, events_path: &Path) { + log::info!("run_id={}", summary.run_id); + log::info!("driver_id={}", summary.driver_id); + log::info!("tpmc_like={:.2}", summary.tpmc_like); + log::info!("total_transactions={}", summary.total_transactions); + for (name, txn) in &summary.transactions { + log::info!( + "{} count={} success={} failure={} p95_ms={} p99_ms={}", + name, + txn.count, + txn.success, + txn.failure, + txn.p95_latency_ms, + txn.p99_latency_ms + ); + } + log::info!( + "delivery queued={} completed={} pending={}", + summary.delivery.queued, + summary.delivery.completed, + summary.delivery.pending + ); + log::info!("summary={}", summary_path.display()); + log::info!("events={}", events_path.display()); +} diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs new file mode 100644 index 00000000000..66d3304a7a6 --- /dev/null +++ b/tools/tpcc-runner/src/loader.rs @@ -0,0 +1,307 @@ +use anyhow::{Context, Result}; +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use std::time::SystemTime; + +use crate::client::ModuleClient; +use crate::config::LoadConfig; +use crate::module_bindings::*; +use crate::tpcc::*; +use spacetimedb_sdk::Timestamp; + +const WAREHOUSE_YTD_CENTS: i64 = 30_000_000; +const DISTRICT_YTD_CENTS: i64 = 3_000_000; +const CUSTOMER_CREDIT_LIMIT_CENTS: i64 = 5_000_000; +const CUSTOMER_INITIAL_BALANCE_CENTS: i64 = -1_000; +const CUSTOMER_INITIAL_YTD_PAYMENT_CENTS: i64 = 1_000; +const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; + +pub fn run(config: LoadConfig) -> Result<()> { + log::info!( + "loading tpcc dataset into {} / {} with {} warehouse(s)", + config.connection.uri, + config.connection.database, + config.warehouses + ); + let client = ModuleClient::connect(&config.connection)?; + if config.reset { + client.reset_tpcc().context("failed to reset tpcc data")?; + } + + let mut rng = StdRng::seed_from_u64(0x5eed_5eed); + let load_c_last = rng.random_range(0..=255); + let base_ts = Timestamp::from(SystemTime::now()); + + load_items(&client, config.batch_size, &mut rng)?; + load_warehouses_and_districts(&client, config.warehouses, config.batch_size, base_ts, &mut rng)?; + load_stock(&client, config.warehouses, config.batch_size, &mut rng)?; + load_customers_history_orders( + &client, + config.warehouses, + config.batch_size, + base_ts, + load_c_last, + &mut rng, + )?; + + client.shutdown(); + log::info!("tpcc load finished"); + Ok(()) +} + +fn load_items(client: &ModuleClient, batch_size: usize, rng: &mut StdRng) -> Result<()> { + let mut batch = Vec::with_capacity(batch_size); + for item_id in 1..=ITEMS { + batch.push(Item { + i_id: item_id, + i_im_id: rng.random_range(1..=10_000), + i_name: alpha_numeric_string(rng, 14, 24), + i_price_cents: rng.random_range(100..=10_000), + i_data: maybe_with_original(rng, 26, 50), + }); + if batch.len() >= batch_size { + client.load_items(std::mem::take(&mut batch))?; + } + } + if !batch.is_empty() { + client.load_items(batch)?; + } + Ok(()) +} + +fn load_warehouses_and_districts( + client: &ModuleClient, + warehouses: u16, + batch_size: usize, + timestamp: Timestamp, + rng: &mut StdRng, +) -> Result<()> { + let mut warehouse_batch = Vec::with_capacity(batch_size); + let mut district_batch = Vec::with_capacity(batch_size); + + for w_id in 1..=warehouses { + warehouse_batch.push(Warehouse { + w_id, + w_name: alpha_string(rng, 6, 10), + w_street_1: alpha_numeric_string(rng, 10, 20), + w_street_2: alpha_numeric_string(rng, 10, 20), + w_city: alpha_string(rng, 10, 20), + w_state: alpha_string(rng, 2, 2), + w_zip: zip_code(rng), + w_tax_bps: rng.random_range(0..=2_000), + w_ytd_cents: WAREHOUSE_YTD_CENTS, + }); + + for d_id in 1..=DISTRICTS_PER_WAREHOUSE { + district_batch.push(District { + district_key: pack_district_key(w_id, d_id), + d_w_id: w_id, + d_id, + d_name: alpha_string(rng, 6, 10), + d_street_1: alpha_numeric_string(rng, 10, 20), + d_street_2: alpha_numeric_string(rng, 10, 20), + d_city: alpha_string(rng, 10, 20), + d_state: alpha_string(rng, 2, 2), + d_zip: zip_code(rng), + d_tax_bps: rng.random_range(0..=2_000), + d_ytd_cents: DISTRICT_YTD_CENTS, + d_next_o_id: CUSTOMERS_PER_DISTRICT + 1, + }); + } + } + + while !warehouse_batch.is_empty() { + let split_at = warehouse_batch.len().min(batch_size); + let remainder = warehouse_batch.split_off(split_at); + let rows = std::mem::replace(&mut warehouse_batch, remainder); + client.load_warehouses(rows)?; + } + while !district_batch.is_empty() { + let split_at = district_batch.len().min(batch_size); + let remainder = district_batch.split_off(split_at); + let rows = std::mem::replace(&mut district_batch, remainder); + client.load_districts(rows)?; + } + let _ = timestamp; + Ok(()) +} + +fn load_stock(client: &ModuleClient, warehouses: u16, batch_size: usize, rng: &mut StdRng) -> Result<()> { + let mut batch = Vec::with_capacity(batch_size); + for w_id in 1..=warehouses { + for item_id in 1..=ITEMS { + batch.push(Stock { + stock_key: pack_stock_key(w_id, item_id), + s_w_id: w_id, + s_i_id: item_id, + s_quantity: rng.random_range(10..=100), + s_dist_01: alpha_string(rng, 24, 24), + s_dist_02: alpha_string(rng, 24, 24), + s_dist_03: alpha_string(rng, 24, 24), + s_dist_04: alpha_string(rng, 24, 24), + s_dist_05: alpha_string(rng, 24, 24), + s_dist_06: alpha_string(rng, 24, 24), + s_dist_07: alpha_string(rng, 24, 24), + s_dist_08: alpha_string(rng, 24, 24), + s_dist_09: alpha_string(rng, 24, 24), + s_dist_10: alpha_string(rng, 24, 24), + s_ytd: 0, + s_order_cnt: 0, + s_remote_cnt: 0, + s_data: maybe_with_original(rng, 26, 50), + }); + if batch.len() >= batch_size { + client.load_stocks(std::mem::take(&mut batch))?; + } + } + } + if !batch.is_empty() { + client.load_stocks(batch)?; + } + Ok(()) +} + +fn load_customers_history_orders( + client: &ModuleClient, + warehouses: u16, + batch_size: usize, + timestamp: Timestamp, + load_c_last: u32, + rng: &mut StdRng, +) -> Result<()> { + let mut customer_batch = Vec::with_capacity(batch_size); + let mut history_batch = Vec::with_capacity(batch_size); + let mut order_batch = Vec::with_capacity(batch_size); + let mut new_order_batch = Vec::with_capacity(batch_size); + let mut order_line_batch = Vec::with_capacity(batch_size); + + for w_id in 1..=warehouses { + for d_id in 1..=DISTRICTS_PER_WAREHOUSE { + let mut permutation: Vec = (1..=CUSTOMERS_PER_DISTRICT).collect(); + permutation.shuffle(rng); + + for c_id in 1..=CUSTOMERS_PER_DISTRICT { + let credit = if rng.random_bool(0.10) { "BC" } else { "GC" }; + let last_name = if c_id <= 1_000 { + make_last_name(c_id - 1) + } else { + make_last_name(nurand(rng, 255, 0, 999, load_c_last)) + }; + customer_batch.push(Customer { + customer_key: pack_customer_key(w_id, d_id, c_id), + c_w_id: w_id, + c_d_id: d_id, + c_id, + c_first: alpha_string(rng, 8, 16), + c_middle: "OE".to_string(), + c_last: last_name, + c_street_1: alpha_numeric_string(rng, 10, 20), + c_street_2: alpha_numeric_string(rng, 10, 20), + c_city: alpha_string(rng, 10, 20), + c_state: alpha_string(rng, 2, 2), + c_zip: zip_code(rng), + c_phone: numeric_string(rng, 16, 16), + c_since: timestamp, + c_credit: credit.to_string(), + c_credit_lim_cents: CUSTOMER_CREDIT_LIMIT_CENTS, + c_discount_bps: rng.random_range(0..=5_000), + c_balance_cents: CUSTOMER_INITIAL_BALANCE_CENTS, + c_ytd_payment_cents: CUSTOMER_INITIAL_YTD_PAYMENT_CENTS, + c_payment_cnt: 1, + c_delivery_cnt: 0, + c_data: alpha_numeric_string(rng, 300, 500), + }); + history_batch.push(History { + history_id: 0, + h_c_id: c_id, + h_c_d_id: d_id, + h_c_w_id: w_id, + h_d_id: d_id, + h_w_id: w_id, + h_date: timestamp, + h_amount_cents: HISTORY_INITIAL_AMOUNT_CENTS, + h_data: alpha_numeric_string(rng, 12, 24), + }); + + if customer_batch.len() >= batch_size { + client.load_customers(std::mem::take(&mut customer_batch))?; + } + if history_batch.len() >= batch_size { + client.load_history(std::mem::take(&mut history_batch))?; + } + } + + for o_id in 1..=CUSTOMERS_PER_DISTRICT { + let customer_id = permutation[(o_id - 1) as usize]; + let delivered = o_id < NEW_ORDER_START; + let order_line_count = rng.random_range(5..=15) as u8; + order_batch.push(OOrder { + order_key: pack_order_key(w_id, d_id, o_id), + o_w_id: w_id, + o_d_id: d_id, + o_id, + o_c_id: customer_id, + o_entry_d: timestamp, + o_carrier_id: if delivered { + Some(rng.random_range(1..=10)) + } else { + None + }, + o_ol_cnt: order_line_count, + o_all_local: true, + }); + if !delivered { + new_order_batch.push(NewOrder { + new_order_key: pack_order_key(w_id, d_id, o_id), + no_w_id: w_id, + no_d_id: d_id, + no_o_id: o_id, + }); + } + + for ol_number in 1..=order_line_count { + order_line_batch.push(OrderLine { + order_line_key: pack_order_line_key(w_id, d_id, o_id, ol_number), + ol_w_id: w_id, + ol_d_id: d_id, + ol_o_id: o_id, + ol_number, + ol_i_id: rng.random_range(1..=ITEMS), + ol_supply_w_id: w_id, + ol_delivery_d: if delivered { Some(timestamp) } else { None }, + ol_quantity: 5, + ol_amount_cents: if delivered { 0 } else { rng.random_range(1..=999_999) }, + ol_dist_info: alpha_string(rng, 24, 24), + }); + if order_line_batch.len() >= batch_size { + client.load_order_lines(std::mem::take(&mut order_line_batch))?; + } + } + + if order_batch.len() >= batch_size { + client.load_orders(std::mem::take(&mut order_batch))?; + } + if new_order_batch.len() >= batch_size { + client.load_new_orders(std::mem::take(&mut new_order_batch))?; + } + } + } + } + + if !customer_batch.is_empty() { + client.load_customers(customer_batch)?; + } + if !history_batch.is_empty() { + client.load_history(history_batch)?; + } + if !order_batch.is_empty() { + client.load_orders(order_batch)?; + } + if !new_order_batch.is_empty() { + client.load_new_orders(new_order_batch)?; + } + if !order_line_batch.is_empty() { + client.load_order_lines(order_line_batch)?; + } + + Ok(()) +} diff --git a/tools/tpcc-runner/src/main.rs b/tools/tpcc-runner/src/main.rs new file mode 100644 index 00000000000..6fd29df23d4 --- /dev/null +++ b/tools/tpcc-runner/src/main.rs @@ -0,0 +1,27 @@ +mod client; +mod config; +mod coordinator; +mod driver; +mod loader; +mod module_bindings; +mod protocol; +mod summary; +mod tpcc; + +use clap::Parser; +use config::{Cli, Command, FileConfig}; +use env_logger::Env; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + env_logger::Builder::from_env(Env::default().default_filter_or("tpcc_runner=info")).init(); + + let cli = Cli::parse(); + let file_config = FileConfig::load(cli.config.as_deref())?; + + match cli.command { + Command::Load(args) => loader::run(args.resolve(&file_config)), + Command::Driver(args) => driver::run(args.resolve(&file_config)?).await, + Command::Coordinator(args) => coordinator::run(args.resolve(&file_config)?).await, + } +} diff --git a/tools/tpcc-runner/src/module_bindings/customer_selector_type.rs b/tools/tpcc-runner/src/module_bindings/customer_selector_type.rs new file mode 100644 index 00000000000..d2b3ae50211 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/customer_selector_type.rs @@ -0,0 +1,17 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub enum CustomerSelector { + ById(u32), + + ByLastName(String), +} + +impl __sdk::InModule for CustomerSelector { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/customer_type.rs b/tools/tpcc-runner/src/module_bindings/customer_type.rs new file mode 100644 index 00000000000..ee57d670065 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/customer_type.rs @@ -0,0 +1,112 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Customer { + pub customer_key: u64, + pub c_w_id: u16, + pub c_d_id: u8, + pub c_id: u32, + pub c_first: String, + pub c_middle: String, + pub c_last: String, + pub c_street_1: String, + pub c_street_2: String, + pub c_city: String, + pub c_state: String, + pub c_zip: String, + pub c_phone: String, + pub c_since: __sdk::Timestamp, + pub c_credit: String, + pub c_credit_lim_cents: i64, + pub c_discount_bps: i32, + pub c_balance_cents: i64, + pub c_ytd_payment_cents: i64, + pub c_payment_cnt: u32, + pub c_delivery_cnt: u32, + pub c_data: String, +} + +impl __sdk::InModule for Customer { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Customer`. +/// +/// Provides typed access to columns for query building. +pub struct CustomerCols { + pub customer_key: __sdk::__query_builder::Col, + pub c_w_id: __sdk::__query_builder::Col, + pub c_d_id: __sdk::__query_builder::Col, + pub c_id: __sdk::__query_builder::Col, + pub c_first: __sdk::__query_builder::Col, + pub c_middle: __sdk::__query_builder::Col, + pub c_last: __sdk::__query_builder::Col, + pub c_street_1: __sdk::__query_builder::Col, + pub c_street_2: __sdk::__query_builder::Col, + pub c_city: __sdk::__query_builder::Col, + pub c_state: __sdk::__query_builder::Col, + pub c_zip: __sdk::__query_builder::Col, + pub c_phone: __sdk::__query_builder::Col, + pub c_since: __sdk::__query_builder::Col, + pub c_credit: __sdk::__query_builder::Col, + pub c_credit_lim_cents: __sdk::__query_builder::Col, + pub c_discount_bps: __sdk::__query_builder::Col, + pub c_balance_cents: __sdk::__query_builder::Col, + pub c_ytd_payment_cents: __sdk::__query_builder::Col, + pub c_payment_cnt: __sdk::__query_builder::Col, + pub c_delivery_cnt: __sdk::__query_builder::Col, + pub c_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Customer { + type Cols = CustomerCols; + fn cols(table_name: &'static str) -> Self::Cols { + CustomerCols { + customer_key: __sdk::__query_builder::Col::new(table_name, "customer_key"), + c_w_id: __sdk::__query_builder::Col::new(table_name, "c_w_id"), + c_d_id: __sdk::__query_builder::Col::new(table_name, "c_d_id"), + c_id: __sdk::__query_builder::Col::new(table_name, "c_id"), + c_first: __sdk::__query_builder::Col::new(table_name, "c_first"), + c_middle: __sdk::__query_builder::Col::new(table_name, "c_middle"), + c_last: __sdk::__query_builder::Col::new(table_name, "c_last"), + c_street_1: __sdk::__query_builder::Col::new(table_name, "c_street_1"), + c_street_2: __sdk::__query_builder::Col::new(table_name, "c_street_2"), + c_city: __sdk::__query_builder::Col::new(table_name, "c_city"), + c_state: __sdk::__query_builder::Col::new(table_name, "c_state"), + c_zip: __sdk::__query_builder::Col::new(table_name, "c_zip"), + c_phone: __sdk::__query_builder::Col::new(table_name, "c_phone"), + c_since: __sdk::__query_builder::Col::new(table_name, "c_since"), + c_credit: __sdk::__query_builder::Col::new(table_name, "c_credit"), + c_credit_lim_cents: __sdk::__query_builder::Col::new(table_name, "c_credit_lim_cents"), + c_discount_bps: __sdk::__query_builder::Col::new(table_name, "c_discount_bps"), + c_balance_cents: __sdk::__query_builder::Col::new(table_name, "c_balance_cents"), + c_ytd_payment_cents: __sdk::__query_builder::Col::new(table_name, "c_ytd_payment_cents"), + c_payment_cnt: __sdk::__query_builder::Col::new(table_name, "c_payment_cnt"), + c_delivery_cnt: __sdk::__query_builder::Col::new(table_name, "c_delivery_cnt"), + c_data: __sdk::__query_builder::Col::new(table_name, "c_data"), + } + } +} + +/// Indexed column accessor struct for the table `Customer`. +/// +/// Provides typed access to indexed columns for query building. +pub struct CustomerIxCols { + pub customer_key: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for Customer { + type IxCols = CustomerIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + CustomerIxCols { + customer_key: __sdk::__query_builder::IxCol::new(table_name, "customer_key"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Customer {} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_completion_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_completion_type.rs new file mode 100644 index 00000000000..9ede637f78a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_completion_type.rs @@ -0,0 +1,79 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryCompletion { + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: __sdk::Timestamp, + pub completed_at: __sdk::Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +impl __sdk::InModule for DeliveryCompletion { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `DeliveryCompletion`. +/// +/// Provides typed access to columns for query building. +pub struct DeliveryCompletionCols { + pub completion_id: __sdk::__query_builder::Col, + pub run_id: __sdk::__query_builder::Col, + pub driver_id: __sdk::__query_builder::Col, + pub terminal_id: __sdk::__query_builder::Col, + pub request_id: __sdk::__query_builder::Col, + pub warehouse_id: __sdk::__query_builder::Col, + pub carrier_id: __sdk::__query_builder::Col, + pub queued_at: __sdk::__query_builder::Col, + pub completed_at: __sdk::__query_builder::Col, + pub skipped_districts: __sdk::__query_builder::Col, + pub processed_districts: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for DeliveryCompletion { + type Cols = DeliveryCompletionCols; + fn cols(table_name: &'static str) -> Self::Cols { + DeliveryCompletionCols { + completion_id: __sdk::__query_builder::Col::new(table_name, "completion_id"), + run_id: __sdk::__query_builder::Col::new(table_name, "run_id"), + driver_id: __sdk::__query_builder::Col::new(table_name, "driver_id"), + terminal_id: __sdk::__query_builder::Col::new(table_name, "terminal_id"), + request_id: __sdk::__query_builder::Col::new(table_name, "request_id"), + warehouse_id: __sdk::__query_builder::Col::new(table_name, "warehouse_id"), + carrier_id: __sdk::__query_builder::Col::new(table_name, "carrier_id"), + queued_at: __sdk::__query_builder::Col::new(table_name, "queued_at"), + completed_at: __sdk::__query_builder::Col::new(table_name, "completed_at"), + skipped_districts: __sdk::__query_builder::Col::new(table_name, "skipped_districts"), + processed_districts: __sdk::__query_builder::Col::new(table_name, "processed_districts"), + } + } +} + +/// Indexed column accessor struct for the table `DeliveryCompletion`. +/// +/// Provides typed access to indexed columns for query building. +pub struct DeliveryCompletionIxCols { + pub completion_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for DeliveryCompletion { + type IxCols = DeliveryCompletionIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + DeliveryCompletionIxCols { + completion_id: __sdk::__query_builder::IxCol::new(table_name, "completion_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for DeliveryCompletion {} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_completion_view_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_completion_view_type.rs new file mode 100644 index 00000000000..97e7a7c2d60 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_completion_view_type.rs @@ -0,0 +1,25 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryCompletionView { + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: __sdk::Timestamp, + pub completed_at: __sdk::Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +impl __sdk::InModule for DeliveryCompletionView { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_job_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_job_type.rs new file mode 100644 index 00000000000..84c44ea55cc --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_job_type.rs @@ -0,0 +1,84 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryJob { + pub scheduled_id: u64, + pub scheduled_at: __sdk::ScheduleAt, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub queued_at: __sdk::Timestamp, + pub w_id: u16, + pub carrier_id: u8, + pub next_d_id: u8, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +impl __sdk::InModule for DeliveryJob { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `DeliveryJob`. +/// +/// Provides typed access to columns for query building. +pub struct DeliveryJobCols { + pub scheduled_id: __sdk::__query_builder::Col, + pub scheduled_at: __sdk::__query_builder::Col, + pub run_id: __sdk::__query_builder::Col, + pub driver_id: __sdk::__query_builder::Col, + pub terminal_id: __sdk::__query_builder::Col, + pub request_id: __sdk::__query_builder::Col, + pub queued_at: __sdk::__query_builder::Col, + pub w_id: __sdk::__query_builder::Col, + pub carrier_id: __sdk::__query_builder::Col, + pub next_d_id: __sdk::__query_builder::Col, + pub skipped_districts: __sdk::__query_builder::Col, + pub processed_districts: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for DeliveryJob { + type Cols = DeliveryJobCols; + fn cols(table_name: &'static str) -> Self::Cols { + DeliveryJobCols { + scheduled_id: __sdk::__query_builder::Col::new(table_name, "scheduled_id"), + scheduled_at: __sdk::__query_builder::Col::new(table_name, "scheduled_at"), + run_id: __sdk::__query_builder::Col::new(table_name, "run_id"), + driver_id: __sdk::__query_builder::Col::new(table_name, "driver_id"), + terminal_id: __sdk::__query_builder::Col::new(table_name, "terminal_id"), + request_id: __sdk::__query_builder::Col::new(table_name, "request_id"), + queued_at: __sdk::__query_builder::Col::new(table_name, "queued_at"), + w_id: __sdk::__query_builder::Col::new(table_name, "w_id"), + carrier_id: __sdk::__query_builder::Col::new(table_name, "carrier_id"), + next_d_id: __sdk::__query_builder::Col::new(table_name, "next_d_id"), + skipped_districts: __sdk::__query_builder::Col::new(table_name, "skipped_districts"), + processed_districts: __sdk::__query_builder::Col::new(table_name, "processed_districts"), + } + } +} + +/// Indexed column accessor struct for the table `DeliveryJob`. +/// +/// Provides typed access to indexed columns for query building. +pub struct DeliveryJobIxCols { + pub run_id: __sdk::__query_builder::IxCol, + pub scheduled_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for DeliveryJob { + type IxCols = DeliveryJobIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + DeliveryJobIxCols { + run_id: __sdk::__query_builder::IxCol::new(table_name, "run_id"), + scheduled_id: __sdk::__query_builder::IxCol::new(table_name, "scheduled_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for DeliveryJob {} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs b/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs new file mode 100644 index 00000000000..697c941b658 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_progress_type::DeliveryProgress; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct DeliveryProgressArgs { + pub run_id: String, +} + +impl __sdk::InModule for DeliveryProgressArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `delivery_progress`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait delivery_progress { + fn delivery_progress(&self, run_id: String) { + self.delivery_progress_then(run_id, |_, _| {}); + } + + fn delivery_progress_then( + &self, + run_id: String, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl delivery_progress for super::RemoteProcedures { + fn delivery_progress_then( + &self, + run_id: String, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "delivery_progress", + DeliveryProgressArgs { run_id }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_progress_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_progress_type.rs new file mode 100644 index 00000000000..f2c09493a61 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_progress_type.rs @@ -0,0 +1,17 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryProgress { + pub run_id: String, + pub pending_jobs: u64, + pub completed_jobs: u64, +} + +impl __sdk::InModule for DeliveryProgress { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_queue_ack_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_queue_ack_type.rs new file mode 100644 index 00000000000..1941feda7e0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_queue_ack_type.rs @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryQueueAck { + pub scheduled_id: u64, + pub queued_at: __sdk::Timestamp, + pub warehouse_id: u16, + pub carrier_id: u8, +} + +impl __sdk::InModule for DeliveryQueueAck { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/district_type.rs b/tools/tpcc-runner/src/module_bindings/district_type.rs new file mode 100644 index 00000000000..a686dcdf533 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/district_type.rs @@ -0,0 +1,82 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct District { + pub district_key: u32, + pub d_w_id: u16, + pub d_id: u8, + pub d_name: String, + pub d_street_1: String, + pub d_street_2: String, + pub d_city: String, + pub d_state: String, + pub d_zip: String, + pub d_tax_bps: i32, + pub d_ytd_cents: i64, + pub d_next_o_id: u32, +} + +impl __sdk::InModule for District { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `District`. +/// +/// Provides typed access to columns for query building. +pub struct DistrictCols { + pub district_key: __sdk::__query_builder::Col, + pub d_w_id: __sdk::__query_builder::Col, + pub d_id: __sdk::__query_builder::Col, + pub d_name: __sdk::__query_builder::Col, + pub d_street_1: __sdk::__query_builder::Col, + pub d_street_2: __sdk::__query_builder::Col, + pub d_city: __sdk::__query_builder::Col, + pub d_state: __sdk::__query_builder::Col, + pub d_zip: __sdk::__query_builder::Col, + pub d_tax_bps: __sdk::__query_builder::Col, + pub d_ytd_cents: __sdk::__query_builder::Col, + pub d_next_o_id: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for District { + type Cols = DistrictCols; + fn cols(table_name: &'static str) -> Self::Cols { + DistrictCols { + district_key: __sdk::__query_builder::Col::new(table_name, "district_key"), + d_w_id: __sdk::__query_builder::Col::new(table_name, "d_w_id"), + d_id: __sdk::__query_builder::Col::new(table_name, "d_id"), + d_name: __sdk::__query_builder::Col::new(table_name, "d_name"), + d_street_1: __sdk::__query_builder::Col::new(table_name, "d_street_1"), + d_street_2: __sdk::__query_builder::Col::new(table_name, "d_street_2"), + d_city: __sdk::__query_builder::Col::new(table_name, "d_city"), + d_state: __sdk::__query_builder::Col::new(table_name, "d_state"), + d_zip: __sdk::__query_builder::Col::new(table_name, "d_zip"), + d_tax_bps: __sdk::__query_builder::Col::new(table_name, "d_tax_bps"), + d_ytd_cents: __sdk::__query_builder::Col::new(table_name, "d_ytd_cents"), + d_next_o_id: __sdk::__query_builder::Col::new(table_name, "d_next_o_id"), + } + } +} + +/// Indexed column accessor struct for the table `District`. +/// +/// Provides typed access to indexed columns for query building. +pub struct DistrictIxCols { + pub district_key: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for District { + type IxCols = DistrictIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + DistrictIxCols { + district_key: __sdk::__query_builder::IxCol::new(table_name, "district_key"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for District {} diff --git a/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs b/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs new file mode 100644 index 00000000000..23760be1e9a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_completion_view_type::DeliveryCompletionView; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct FetchDeliveryCompletionsArgs { + pub run_id: String, + pub after_completion_id: u64, + pub limit: u32, +} + +impl __sdk::InModule for FetchDeliveryCompletionsArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `fetch_delivery_completions`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait fetch_delivery_completions { + fn fetch_delivery_completions(&self, run_id: String, after_completion_id: u64, limit: u32) { + self.fetch_delivery_completions_then(run_id, after_completion_id, limit, |_, _| {}); + } + + fn fetch_delivery_completions_then( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, String>, __sdk::InternalError>, + ) + Send + + 'static, + ); +} + +impl fetch_delivery_completions for super::RemoteProcedures { + fn fetch_delivery_completions_then( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, String>, __sdk::InternalError>, + ) + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result, String>>( + "fetch_delivery_completions", + FetchDeliveryCompletionsArgs { + run_id, + after_completion_id, + limit, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/history_type.rs b/tools/tpcc-runner/src/module_bindings/history_type.rs new file mode 100644 index 00000000000..fcfea55eb56 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/history_type.rs @@ -0,0 +1,73 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct History { + pub history_id: u64, + pub h_c_id: u32, + pub h_c_d_id: u8, + pub h_c_w_id: u16, + pub h_d_id: u8, + pub h_w_id: u16, + pub h_date: __sdk::Timestamp, + pub h_amount_cents: i64, + pub h_data: String, +} + +impl __sdk::InModule for History { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `History`. +/// +/// Provides typed access to columns for query building. +pub struct HistoryCols { + pub history_id: __sdk::__query_builder::Col, + pub h_c_id: __sdk::__query_builder::Col, + pub h_c_d_id: __sdk::__query_builder::Col, + pub h_c_w_id: __sdk::__query_builder::Col, + pub h_d_id: __sdk::__query_builder::Col, + pub h_w_id: __sdk::__query_builder::Col, + pub h_date: __sdk::__query_builder::Col, + pub h_amount_cents: __sdk::__query_builder::Col, + pub h_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for History { + type Cols = HistoryCols; + fn cols(table_name: &'static str) -> Self::Cols { + HistoryCols { + history_id: __sdk::__query_builder::Col::new(table_name, "history_id"), + h_c_id: __sdk::__query_builder::Col::new(table_name, "h_c_id"), + h_c_d_id: __sdk::__query_builder::Col::new(table_name, "h_c_d_id"), + h_c_w_id: __sdk::__query_builder::Col::new(table_name, "h_c_w_id"), + h_d_id: __sdk::__query_builder::Col::new(table_name, "h_d_id"), + h_w_id: __sdk::__query_builder::Col::new(table_name, "h_w_id"), + h_date: __sdk::__query_builder::Col::new(table_name, "h_date"), + h_amount_cents: __sdk::__query_builder::Col::new(table_name, "h_amount_cents"), + h_data: __sdk::__query_builder::Col::new(table_name, "h_data"), + } + } +} + +/// Indexed column accessor struct for the table `History`. +/// +/// Provides typed access to indexed columns for query building. +pub struct HistoryIxCols { + pub history_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for History { + type IxCols = HistoryIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + HistoryIxCols { + history_id: __sdk::__query_builder::IxCol::new(table_name, "history_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for History {} diff --git a/tools/tpcc-runner/src/module_bindings/item_type.rs b/tools/tpcc-runner/src/module_bindings/item_type.rs new file mode 100644 index 00000000000..d1382e5fc61 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/item_type.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Item { + pub i_id: u32, + pub i_im_id: u32, + pub i_name: String, + pub i_price_cents: i64, + pub i_data: String, +} + +impl __sdk::InModule for Item { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Item`. +/// +/// Provides typed access to columns for query building. +pub struct ItemCols { + pub i_id: __sdk::__query_builder::Col, + pub i_im_id: __sdk::__query_builder::Col, + pub i_name: __sdk::__query_builder::Col, + pub i_price_cents: __sdk::__query_builder::Col, + pub i_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Item { + type Cols = ItemCols; + fn cols(table_name: &'static str) -> Self::Cols { + ItemCols { + i_id: __sdk::__query_builder::Col::new(table_name, "i_id"), + i_im_id: __sdk::__query_builder::Col::new(table_name, "i_im_id"), + i_name: __sdk::__query_builder::Col::new(table_name, "i_name"), + i_price_cents: __sdk::__query_builder::Col::new(table_name, "i_price_cents"), + i_data: __sdk::__query_builder::Col::new(table_name, "i_data"), + } + } +} + +/// Indexed column accessor struct for the table `Item`. +/// +/// Provides typed access to indexed columns for query building. +pub struct ItemIxCols { + pub i_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for Item { + type IxCols = ItemIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + ItemIxCols { + i_id: __sdk::__query_builder::IxCol::new(table_name, "i_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Item {} diff --git a/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs new file mode 100644 index 00000000000..68000e9611b --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_type::Customer; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadCustomersArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadCustomersArgs) -> Self { + Self::LoadCustomers { rows: args.rows } + } +} + +impl __sdk::InModule for LoadCustomersArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_customers`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_customers { + /// Request that the remote module invoke the reducer `load_customers` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_customers:load_customers_then`] to run a callback after the reducer completes. + fn load_customers(&self, rows: Vec) -> __sdk::Result<()> { + self.load_customers_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_customers` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_customers_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_customers for super::RemoteReducers { + fn load_customers_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadCustomersArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs new file mode 100644 index 00000000000..4d9e6c75cd2 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::district_type::District; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadDistrictsArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadDistrictsArgs) -> Self { + Self::LoadDistricts { rows: args.rows } + } +} + +impl __sdk::InModule for LoadDistrictsArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_districts`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_districts { + /// Request that the remote module invoke the reducer `load_districts` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_districts:load_districts_then`] to run a callback after the reducer completes. + fn load_districts(&self, rows: Vec) -> __sdk::Result<()> { + self.load_districts_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_districts` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_districts_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_districts for super::RemoteReducers { + fn load_districts_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadDistrictsArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs new file mode 100644 index 00000000000..73517bccebe --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::history_type::History; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadHistoryArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadHistoryArgs) -> Self { + Self::LoadHistory { rows: args.rows } + } +} + +impl __sdk::InModule for LoadHistoryArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_history`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_history { + /// Request that the remote module invoke the reducer `load_history` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_history:load_history_then`] to run a callback after the reducer completes. + fn load_history(&self, rows: Vec) -> __sdk::Result<()> { + self.load_history_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_history` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_history_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_history for super::RemoteReducers { + fn load_history_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadHistoryArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs new file mode 100644 index 00000000000..7cc306270ae --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs @@ -0,0 +1,67 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::item_type::Item; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadItemsArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadItemsArgs) -> Self { + Self::LoadItems { rows: args.rows } + } +} + +impl __sdk::InModule for LoadItemsArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_items`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_items { + /// Request that the remote module invoke the reducer `load_items` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_items:load_items_then`] to run a callback after the reducer completes. + fn load_items(&self, rows: Vec) -> __sdk::Result<()> { + self.load_items_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_items` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_items_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_items for super::RemoteReducers { + fn load_items_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(LoadItemsArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs new file mode 100644 index 00000000000..d79b6550953 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_type::NewOrder; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadNewOrdersArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadNewOrdersArgs) -> Self { + Self::LoadNewOrders { rows: args.rows } + } +} + +impl __sdk::InModule for LoadNewOrdersArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_new_orders`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_new_orders { + /// Request that the remote module invoke the reducer `load_new_orders` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_new_orders:load_new_orders_then`] to run a callback after the reducer completes. + fn load_new_orders(&self, rows: Vec) -> __sdk::Result<()> { + self.load_new_orders_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_new_orders` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_new_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_new_orders for super::RemoteReducers { + fn load_new_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadNewOrdersArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs new file mode 100644 index 00000000000..189f862f8a5 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::order_line_type::OrderLine; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadOrderLinesArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadOrderLinesArgs) -> Self { + Self::LoadOrderLines { rows: args.rows } + } +} + +impl __sdk::InModule for LoadOrderLinesArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_order_lines`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_order_lines { + /// Request that the remote module invoke the reducer `load_order_lines` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_order_lines:load_order_lines_then`] to run a callback after the reducer completes. + fn load_order_lines(&self, rows: Vec) -> __sdk::Result<()> { + self.load_order_lines_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_order_lines` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_order_lines_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_order_lines for super::RemoteReducers { + fn load_order_lines_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadOrderLinesArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs new file mode 100644 index 00000000000..a72bb0a9235 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs @@ -0,0 +1,67 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::o_order_type::OOrder; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadOrdersArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadOrdersArgs) -> Self { + Self::LoadOrders { rows: args.rows } + } +} + +impl __sdk::InModule for LoadOrdersArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_orders`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_orders { + /// Request that the remote module invoke the reducer `load_orders` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_orders:load_orders_then`] to run a callback after the reducer completes. + fn load_orders(&self, rows: Vec) -> __sdk::Result<()> { + self.load_orders_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_orders` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_orders for super::RemoteReducers { + fn load_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(LoadOrdersArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs new file mode 100644 index 00000000000..89d3f80bf7b --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs @@ -0,0 +1,67 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::stock_type::Stock; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadStocksArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadStocksArgs) -> Self { + Self::LoadStocks { rows: args.rows } + } +} + +impl __sdk::InModule for LoadStocksArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_stocks`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_stocks { + /// Request that the remote module invoke the reducer `load_stocks` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_stocks:load_stocks_then`] to run a callback after the reducer completes. + fn load_stocks(&self, rows: Vec) -> __sdk::Result<()> { + self.load_stocks_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_stocks` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_stocks_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_stocks for super::RemoteReducers { + fn load_stocks_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(LoadStocksArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs new file mode 100644 index 00000000000..b6986a465b0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::warehouse_type::Warehouse; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadWarehousesArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadWarehousesArgs) -> Self { + Self::LoadWarehouses { rows: args.rows } + } +} + +impl __sdk::InModule for LoadWarehousesArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_warehouses`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_warehouses { + /// Request that the remote module invoke the reducer `load_warehouses` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_warehouses:load_warehouses_then`] to run a callback after the reducer completes. + fn load_warehouses(&self, rows: Vec) -> __sdk::Result<()> { + self.load_warehouses_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_warehouses` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_warehouses_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_warehouses for super::RemoteReducers { + fn load_warehouses_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadWarehousesArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/mod.rs b/tools/tpcc-runner/src/module_bindings/mod.rs new file mode 100644 index 00000000000..4bca7a5d7da --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/mod.rs @@ -0,0 +1,889 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +// This was generated using spacetimedb cli version 2.1.0 (commit 36c416ff4e2b1546db51145c2bcd65070e36b416). + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +pub mod customer_selector_type; +pub mod customer_type; +pub mod delivery_completion_type; +pub mod delivery_completion_view_type; +pub mod delivery_job_type; +pub mod delivery_progress_procedure; +pub mod delivery_progress_type; +pub mod delivery_queue_ack_type; +pub mod district_type; +pub mod fetch_delivery_completions_procedure; +pub mod history_type; +pub mod item_type; +pub mod load_customers_reducer; +pub mod load_districts_reducer; +pub mod load_history_reducer; +pub mod load_items_reducer; +pub mod load_new_orders_reducer; +pub mod load_order_lines_reducer; +pub mod load_orders_reducer; +pub mod load_stocks_reducer; +pub mod load_warehouses_reducer; +pub mod new_order_line_input_type; +pub mod new_order_line_result_type; +pub mod new_order_procedure; +pub mod new_order_result_type; +pub mod new_order_type; +pub mod o_order_type; +pub mod order_line_type; +pub mod order_status_line_result_type; +pub mod order_status_procedure; +pub mod order_status_result_type; +pub mod payment_procedure; +pub mod payment_result_type; +pub mod queue_delivery_procedure; +pub mod reset_tpcc_reducer; +pub mod stock_level_procedure; +pub mod stock_level_result_type; +pub mod stock_type; +pub mod warehouse_type; + +pub use customer_selector_type::CustomerSelector; +pub use customer_type::Customer; +pub use delivery_completion_type::DeliveryCompletion; +pub use delivery_completion_view_type::DeliveryCompletionView; +pub use delivery_job_type::DeliveryJob; +pub use delivery_progress_procedure::delivery_progress; +pub use delivery_progress_type::DeliveryProgress; +pub use delivery_queue_ack_type::DeliveryQueueAck; +pub use district_type::District; +pub use fetch_delivery_completions_procedure::fetch_delivery_completions; +pub use history_type::History; +pub use item_type::Item; +pub use load_customers_reducer::load_customers; +pub use load_districts_reducer::load_districts; +pub use load_history_reducer::load_history; +pub use load_items_reducer::load_items; +pub use load_new_orders_reducer::load_new_orders; +pub use load_order_lines_reducer::load_order_lines; +pub use load_orders_reducer::load_orders; +pub use load_stocks_reducer::load_stocks; +pub use load_warehouses_reducer::load_warehouses; +pub use new_order_line_input_type::NewOrderLineInput; +pub use new_order_line_result_type::NewOrderLineResult; +pub use new_order_procedure::new_order; +pub use new_order_result_type::NewOrderResult; +pub use new_order_type::NewOrder; +pub use o_order_type::OOrder; +pub use order_line_type::OrderLine; +pub use order_status_line_result_type::OrderStatusLineResult; +pub use order_status_procedure::order_status; +pub use order_status_result_type::OrderStatusResult; +pub use payment_procedure::payment; +pub use payment_result_type::PaymentResult; +pub use queue_delivery_procedure::queue_delivery; +pub use reset_tpcc_reducer::reset_tpcc; +pub use stock_level_procedure::stock_level; +pub use stock_level_result_type::StockLevelResult; +pub use stock_type::Stock; +pub use warehouse_type::Warehouse; + +#[derive(Clone, PartialEq, Debug)] + +/// One of the reducers defined by this module. +/// +/// Contained within a [`__sdk::ReducerEvent`] in [`EventContext`]s for reducer events +/// to indicate which reducer caused the event. + +pub enum Reducer { + LoadCustomers { rows: Vec }, + LoadDistricts { rows: Vec }, + LoadHistory { rows: Vec }, + LoadItems { rows: Vec }, + LoadNewOrders { rows: Vec }, + LoadOrderLines { rows: Vec }, + LoadOrders { rows: Vec }, + LoadStocks { rows: Vec }, + LoadWarehouses { rows: Vec }, + ResetTpcc, +} + +impl __sdk::InModule for Reducer { + type Module = RemoteModule; +} + +impl __sdk::Reducer for Reducer { + fn reducer_name(&self) -> &'static str { + match self { + Reducer::LoadCustomers { .. } => "load_customers", + Reducer::LoadDistricts { .. } => "load_districts", + Reducer::LoadHistory { .. } => "load_history", + Reducer::LoadItems { .. } => "load_items", + Reducer::LoadNewOrders { .. } => "load_new_orders", + Reducer::LoadOrderLines { .. } => "load_order_lines", + Reducer::LoadOrders { .. } => "load_orders", + Reducer::LoadStocks { .. } => "load_stocks", + Reducer::LoadWarehouses { .. } => "load_warehouses", + Reducer::ResetTpcc => "reset_tpcc", + _ => unreachable!(), + } + } + #[allow(clippy::clone_on_copy)] + fn args_bsatn(&self) -> Result, __sats::bsatn::EncodeError> { + match self { + Reducer::LoadCustomers { rows } => { + __sats::bsatn::to_vec(&load_customers_reducer::LoadCustomersArgs { rows: rows.clone() }) + } + Reducer::LoadDistricts { rows } => { + __sats::bsatn::to_vec(&load_districts_reducer::LoadDistrictsArgs { rows: rows.clone() }) + } + Reducer::LoadHistory { rows } => { + __sats::bsatn::to_vec(&load_history_reducer::LoadHistoryArgs { rows: rows.clone() }) + } + Reducer::LoadItems { rows } => { + __sats::bsatn::to_vec(&load_items_reducer::LoadItemsArgs { rows: rows.clone() }) + } + Reducer::LoadNewOrders { rows } => { + __sats::bsatn::to_vec(&load_new_orders_reducer::LoadNewOrdersArgs { rows: rows.clone() }) + } + Reducer::LoadOrderLines { rows } => { + __sats::bsatn::to_vec(&load_order_lines_reducer::LoadOrderLinesArgs { rows: rows.clone() }) + } + Reducer::LoadOrders { rows } => { + __sats::bsatn::to_vec(&load_orders_reducer::LoadOrdersArgs { rows: rows.clone() }) + } + Reducer::LoadStocks { rows } => { + __sats::bsatn::to_vec(&load_stocks_reducer::LoadStocksArgs { rows: rows.clone() }) + } + Reducer::LoadWarehouses { rows } => { + __sats::bsatn::to_vec(&load_warehouses_reducer::LoadWarehousesArgs { rows: rows.clone() }) + } + Reducer::ResetTpcc => __sats::bsatn::to_vec(&reset_tpcc_reducer::ResetTpccArgs {}), + _ => unreachable!(), + } + } +} + +#[derive(Default, Debug)] +#[allow(non_snake_case)] +#[doc(hidden)] +pub struct DbUpdate {} + +impl TryFrom<__ws::v2::TransactionUpdate> for DbUpdate { + type Error = __sdk::Error; + fn try_from(raw: __ws::v2::TransactionUpdate) -> Result { + let mut db_update = DbUpdate::default(); + for table_update in __sdk::transaction_update_iter_table_updates(raw) { + match &table_update.table_name[..] { + unknown => { + return Err(__sdk::InternalError::unknown_name("table", unknown, "DatabaseUpdate").into()); + } + } + } + Ok(db_update) + } +} + +impl __sdk::InModule for DbUpdate { + type Module = RemoteModule; +} + +impl __sdk::DbUpdate for DbUpdate { + fn apply_to_client_cache(&self, cache: &mut __sdk::ClientCache) -> AppliedDiff<'_> { + let mut diff = AppliedDiff::default(); + + diff + } + fn parse_initial_rows(raw: __ws::v2::QueryRows) -> __sdk::Result { + let mut db_update = DbUpdate::default(); + for table_rows in raw.tables { + match &table_rows.table[..] { + unknown => { + return Err(__sdk::InternalError::unknown_name("table", unknown, "QueryRows").into()); + } + } + } + Ok(db_update) + } + fn parse_unsubscribe_rows(raw: __ws::v2::QueryRows) -> __sdk::Result { + let mut db_update = DbUpdate::default(); + for table_rows in raw.tables { + match &table_rows.table[..] { + unknown => { + return Err(__sdk::InternalError::unknown_name("table", unknown, "QueryRows").into()); + } + } + } + Ok(db_update) + } +} + +#[derive(Default)] +#[allow(non_snake_case)] +#[doc(hidden)] +pub struct AppliedDiff<'r> { + __unused: std::marker::PhantomData<&'r ()>, +} + +impl __sdk::InModule for AppliedDiff<'_> { + type Module = RemoteModule; +} + +impl<'r> __sdk::AppliedDiff<'r> for AppliedDiff<'r> { + fn invoke_row_callbacks(&self, event: &EventContext, callbacks: &mut __sdk::DbCallbacks) {} +} + +#[doc(hidden)] +#[derive(Debug)] +pub struct RemoteModule; + +impl __sdk::InModule for RemoteModule { + type Module = Self; +} + +/// The `reducers` field of [`EventContext`] and [`DbConnection`], +/// with methods provided by extension traits for each reducer defined by the module. +pub struct RemoteReducers { + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for RemoteReducers { + type Module = RemoteModule; +} + +/// The `procedures` field of [`DbConnection`] and other [`DbContext`] types, +/// with methods provided by extension traits for each procedure defined by the module. +pub struct RemoteProcedures { + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for RemoteProcedures { + type Module = RemoteModule; +} + +/// The `db` field of [`EventContext`] and [`DbConnection`], +/// with methods provided by extension traits for each table defined by the module. +pub struct RemoteTables { + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for RemoteTables { + type Module = RemoteModule; +} + +/// A connection to a remote module, including a materialized view of a subset of the database. +/// +/// Connect to a remote module by calling [`DbConnection::builder`] +/// and using the [`__sdk::DbConnectionBuilder`] builder-pattern constructor. +/// +/// You must explicitly advance the connection by calling any one of: +/// +/// - [`DbConnection::frame_tick`]. +#[cfg_attr(not(target_arch = "wasm32"), doc = "- [`DbConnection::run_threaded`].")] +#[cfg_attr(target_arch = "wasm32", doc = "- [`DbConnection::run_background_task`].")] +/// - [`DbConnection::run_async`]. +/// - [`DbConnection::advance_one_message`]. +#[cfg_attr( + not(target_arch = "wasm32"), + doc = "- [`DbConnection::advance_one_message_blocking`]." +)] +/// - [`DbConnection::advance_one_message_async`]. +/// +/// Which of these methods you should call depends on the specific needs of your application, +/// but you must call one of them, or else the connection will never progress. +pub struct DbConnection { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + #[doc(hidden)] + + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for DbConnection { + type Module = RemoteModule; +} + +impl __sdk::DbContext for DbConnection { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl DbConnection { + /// Builder-pattern constructor for a connection to a remote module. + /// + /// See [`__sdk::DbConnectionBuilder`] for required and optional configuration for the new connection. + pub fn builder() -> __sdk::DbConnectionBuilder { + __sdk::DbConnectionBuilder::new() + } + + /// If any WebSocket messages are waiting, process one of them. + /// + /// Returns `true` if a message was processed, or `false` if the queue is empty. + /// Callers should invoke this message in a loop until it returns `false` + /// or for as much time is available to process messages. + /// + /// Returns an error if the connection is disconnected. + /// If the disconnection in question was normal, + /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], + /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// + /// This is a low-level primitive exposed for power users who need significant control over scheduling. + /// Most applications should call [`Self::frame_tick`] each frame + /// to fully exhaust the queue whenever time is available. + pub fn advance_one_message(&self) -> __sdk::Result { + self.imp.advance_one_message() + } + + /// Process one WebSocket message, potentially blocking the current thread until one is received. + /// + /// Returns an error if the connection is disconnected. + /// If the disconnection in question was normal, + /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], + /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// + /// This is a low-level primitive exposed for power users who need significant control over scheduling. + /// Most applications should call [`Self::run_threaded`] to spawn a thread + /// which advances the connection automatically. + #[cfg(not(target_arch = "wasm32"))] + pub fn advance_one_message_blocking(&self) -> __sdk::Result<()> { + self.imp.advance_one_message_blocking() + } + + /// Process one WebSocket message, `await`ing until one is received. + /// + /// Returns an error if the connection is disconnected. + /// If the disconnection in question was normal, + /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], + /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// + /// This is a low-level primitive exposed for power users who need significant control over scheduling. + /// Most applications should call [`Self::run_async`] to run an `async` loop + /// which advances the connection when polled. + pub async fn advance_one_message_async(&self) -> __sdk::Result<()> { + self.imp.advance_one_message_async().await + } + + /// Process all WebSocket messages waiting in the queue, + /// then return without `await`ing or blocking the current thread. + pub fn frame_tick(&self) -> __sdk::Result<()> { + self.imp.frame_tick() + } + + /// Spawn a thread which processes WebSocket messages as they are received. + #[cfg(not(target_arch = "wasm32"))] + pub fn run_threaded(&self) -> std::thread::JoinHandle<()> { + self.imp.run_threaded() + } + + /// Spawn a background task which processes WebSocket messages as they are received. + #[cfg(target_arch = "wasm32")] + pub fn run_background_task(&self) { + self.imp.run_background_task() + } + + /// Run an `async` loop which processes WebSocket messages when polled. + pub async fn run_async(&self) -> __sdk::Result<()> { + self.imp.run_async().await + } +} + +impl __sdk::DbConnection for DbConnection { + fn new(imp: __sdk::DbContextImpl) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + imp, + } + } +} + +/// A handle on a subscribed query. +// TODO: Document this better after implementing the new subscription API. +#[derive(Clone)] +pub struct SubscriptionHandle { + imp: __sdk::SubscriptionHandleImpl, +} + +impl __sdk::InModule for SubscriptionHandle { + type Module = RemoteModule; +} + +impl __sdk::SubscriptionHandle for SubscriptionHandle { + fn new(imp: __sdk::SubscriptionHandleImpl) -> Self { + Self { imp } + } + + /// Returns true if this subscription has been terminated due to an unsubscribe call or an error. + fn is_ended(&self) -> bool { + self.imp.is_ended() + } + + /// Returns true if this subscription has been applied and has not yet been unsubscribed. + fn is_active(&self) -> bool { + self.imp.is_active() + } + + /// Unsubscribe from the query controlled by this `SubscriptionHandle`, + /// then run `on_end` when its rows are removed from the client cache. + fn unsubscribe_then(self, on_end: __sdk::OnEndedCallback) -> __sdk::Result<()> { + self.imp.unsubscribe_then(Some(on_end)) + } + + fn unsubscribe(self) -> __sdk::Result<()> { + self.imp.unsubscribe_then(None) + } +} + +/// Alias trait for a [`__sdk::DbContext`] connected to this module, +/// with that trait's associated types bounded to this module's concrete types. +/// +/// Users can use this trait as a boundary on definitions which should accept +/// either a [`DbConnection`] or an [`EventContext`] and operate on either. +pub trait RemoteDbContext: + __sdk::DbContext< + DbView = RemoteTables, + Reducers = RemoteReducers, + SubscriptionBuilder = __sdk::SubscriptionBuilder, +> +{ +} +impl< + Ctx: __sdk::DbContext< + DbView = RemoteTables, + Reducers = RemoteReducers, + SubscriptionBuilder = __sdk::SubscriptionBuilder, + >, + > RemoteDbContext for Ctx +{ +} + +/// An [`__sdk::DbContext`] augmented with a [`__sdk::Event`], +/// passed to [`__sdk::Table::on_insert`], [`__sdk::Table::on_delete`] and [`__sdk::TableWithPrimaryKey::on_update`] callbacks. +pub struct EventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + /// The event which caused these callbacks to run. + pub event: __sdk::Event, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for EventContext { + type Event = __sdk::Event; + fn event(&self) -> &Self::Event { + &self.event + } + fn new(imp: __sdk::DbContextImpl, event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + event, + imp, + } + } +} + +impl __sdk::InModule for EventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for EventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::EventContext for EventContext {} + +/// An [`__sdk::DbContext`] augmented with a [`__sdk::ReducerEvent`], +/// passed to on-reducer callbacks. +pub struct ReducerEventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + /// The event which caused these callbacks to run. + pub event: __sdk::ReducerEvent, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for ReducerEventContext { + type Event = __sdk::ReducerEvent; + fn event(&self) -> &Self::Event { + &self.event + } + fn new(imp: __sdk::DbContextImpl, event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + event, + imp, + } + } +} + +impl __sdk::InModule for ReducerEventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for ReducerEventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::ReducerEventContext for ReducerEventContext {} + +/// An [`__sdk::DbContext`] passed to procedure callbacks. +pub struct ProcedureEventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for ProcedureEventContext { + type Event = (); + fn event(&self) -> &Self::Event { + &() + } + fn new(imp: __sdk::DbContextImpl, _event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + imp, + } + } +} + +impl __sdk::InModule for ProcedureEventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for ProcedureEventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::ProcedureEventContext for ProcedureEventContext {} + +/// An [`__sdk::DbContext`] passed to [`__sdk::SubscriptionBuilder::on_applied`] and [`SubscriptionHandle::unsubscribe_then`] callbacks. +pub struct SubscriptionEventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for SubscriptionEventContext { + type Event = (); + fn event(&self) -> &Self::Event { + &() + } + fn new(imp: __sdk::DbContextImpl, _event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + imp, + } + } +} + +impl __sdk::InModule for SubscriptionEventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for SubscriptionEventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::SubscriptionEventContext for SubscriptionEventContext {} + +/// An [`__sdk::DbContext`] augmented with a [`__sdk::Error`], +/// passed to [`__sdk::DbConnectionBuilder::on_disconnect`], [`__sdk::DbConnectionBuilder::on_connect_error`] and [`__sdk::SubscriptionBuilder::on_error`] callbacks. +pub struct ErrorContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + /// The event which caused these callbacks to run. + pub event: Option<__sdk::Error>, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for ErrorContext { + type Event = Option<__sdk::Error>; + fn event(&self) -> &Self::Event { + &self.event + } + fn new(imp: __sdk::DbContextImpl, event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + event, + imp, + } + } +} + +impl __sdk::InModule for ErrorContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for ErrorContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::ErrorContext for ErrorContext {} + +impl __sdk::SpacetimeModule for RemoteModule { + type DbConnection = DbConnection; + type EventContext = EventContext; + type ReducerEventContext = ReducerEventContext; + type ProcedureEventContext = ProcedureEventContext; + type SubscriptionEventContext = SubscriptionEventContext; + type ErrorContext = ErrorContext; + type Reducer = Reducer; + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + type DbUpdate = DbUpdate; + type AppliedDiff<'r> = AppliedDiff<'r>; + type SubscriptionHandle = SubscriptionHandle; + type QueryBuilder = __sdk::QueryBuilder; + + fn register_tables(client_cache: &mut __sdk::ClientCache) {} + const ALL_TABLE_NAMES: &'static [&'static str] = &[]; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_line_input_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_line_input_type.rs new file mode 100644 index 00000000000..4a1ebd2ee02 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_line_input_type.rs @@ -0,0 +1,17 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrderLineInput { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, +} + +impl __sdk::InModule for NewOrderLineInput { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_line_result_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_line_result_type.rs new file mode 100644 index 00000000000..064e19222ca --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_line_result_type.rs @@ -0,0 +1,22 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrderLineResult { + pub item_id: u32, + pub item_name: String, + pub supply_w_id: u16, + pub quantity: u32, + pub stock_quantity: i32, + pub item_price_cents: i64, + pub amount_cents: i64, + pub brand_generic: String, +} + +impl __sdk::InModule for NewOrderLineResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs b/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs new file mode 100644 index 00000000000..03bf82d7d47 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs @@ -0,0 +1,69 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_line_input_type::NewOrderLineInput; +use super::new_order_result_type::NewOrderResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct NewOrderArgs { + pub w_id: u16, + pub d_id: u8, + pub c_id: u32, + pub order_lines: Vec, +} + +impl __sdk::InModule for NewOrderArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `new_order`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait new_order { + fn new_order(&self, w_id: u16, d_id: u8, c_id: u32, order_lines: Vec) { + self.new_order_then(w_id, d_id, c_id, order_lines, |_, _| {}); + } + + fn new_order_then( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl new_order for super::RemoteProcedures { + fn new_order_then( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "new_order", + NewOrderArgs { + w_id, + d_id, + c_id, + order_lines, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_result_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_result_type.rs new file mode 100644 index 00000000000..0ffb88612f0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_result_type.rs @@ -0,0 +1,26 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_line_result_type::NewOrderLineResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrderResult { + pub warehouse_tax_bps: i32, + pub district_tax_bps: i32, + pub customer_discount_bps: i32, + pub customer_last: String, + pub customer_credit: String, + pub order_id: u32, + pub entry_d: __sdk::Timestamp, + pub total_amount_cents: i64, + pub all_local: bool, + pub lines: Vec, +} + +impl __sdk::InModule for NewOrderResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_type.rs new file mode 100644 index 00000000000..e525f4cdca2 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_type.rs @@ -0,0 +1,58 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrder { + pub new_order_key: u64, + pub no_w_id: u16, + pub no_d_id: u8, + pub no_o_id: u32, +} + +impl __sdk::InModule for NewOrder { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `NewOrder`. +/// +/// Provides typed access to columns for query building. +pub struct NewOrderCols { + pub new_order_key: __sdk::__query_builder::Col, + pub no_w_id: __sdk::__query_builder::Col, + pub no_d_id: __sdk::__query_builder::Col, + pub no_o_id: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for NewOrder { + type Cols = NewOrderCols; + fn cols(table_name: &'static str) -> Self::Cols { + NewOrderCols { + new_order_key: __sdk::__query_builder::Col::new(table_name, "new_order_key"), + no_w_id: __sdk::__query_builder::Col::new(table_name, "no_w_id"), + no_d_id: __sdk::__query_builder::Col::new(table_name, "no_d_id"), + no_o_id: __sdk::__query_builder::Col::new(table_name, "no_o_id"), + } + } +} + +/// Indexed column accessor struct for the table `NewOrder`. +/// +/// Provides typed access to indexed columns for query building. +pub struct NewOrderIxCols { + pub new_order_key: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for NewOrder { + type IxCols = NewOrderIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + NewOrderIxCols { + new_order_key: __sdk::__query_builder::IxCol::new(table_name, "new_order_key"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for NewOrder {} diff --git a/tools/tpcc-runner/src/module_bindings/o_order_type.rs b/tools/tpcc-runner/src/module_bindings/o_order_type.rs new file mode 100644 index 00000000000..df8662ff77b --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/o_order_type.rs @@ -0,0 +1,73 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OOrder { + pub order_key: u64, + pub o_w_id: u16, + pub o_d_id: u8, + pub o_id: u32, + pub o_c_id: u32, + pub o_entry_d: __sdk::Timestamp, + pub o_carrier_id: Option, + pub o_ol_cnt: u8, + pub o_all_local: bool, +} + +impl __sdk::InModule for OOrder { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `OOrder`. +/// +/// Provides typed access to columns for query building. +pub struct OOrderCols { + pub order_key: __sdk::__query_builder::Col, + pub o_w_id: __sdk::__query_builder::Col, + pub o_d_id: __sdk::__query_builder::Col, + pub o_id: __sdk::__query_builder::Col, + pub o_c_id: __sdk::__query_builder::Col, + pub o_entry_d: __sdk::__query_builder::Col, + pub o_carrier_id: __sdk::__query_builder::Col>, + pub o_ol_cnt: __sdk::__query_builder::Col, + pub o_all_local: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for OOrder { + type Cols = OOrderCols; + fn cols(table_name: &'static str) -> Self::Cols { + OOrderCols { + order_key: __sdk::__query_builder::Col::new(table_name, "order_key"), + o_w_id: __sdk::__query_builder::Col::new(table_name, "o_w_id"), + o_d_id: __sdk::__query_builder::Col::new(table_name, "o_d_id"), + o_id: __sdk::__query_builder::Col::new(table_name, "o_id"), + o_c_id: __sdk::__query_builder::Col::new(table_name, "o_c_id"), + o_entry_d: __sdk::__query_builder::Col::new(table_name, "o_entry_d"), + o_carrier_id: __sdk::__query_builder::Col::new(table_name, "o_carrier_id"), + o_ol_cnt: __sdk::__query_builder::Col::new(table_name, "o_ol_cnt"), + o_all_local: __sdk::__query_builder::Col::new(table_name, "o_all_local"), + } + } +} + +/// Indexed column accessor struct for the table `OOrder`. +/// +/// Provides typed access to indexed columns for query building. +pub struct OOrderIxCols { + pub order_key: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for OOrder { + type IxCols = OOrderIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + OOrderIxCols { + order_key: __sdk::__query_builder::IxCol::new(table_name, "order_key"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for OOrder {} diff --git a/tools/tpcc-runner/src/module_bindings/order_line_type.rs b/tools/tpcc-runner/src/module_bindings/order_line_type.rs new file mode 100644 index 00000000000..21c597e0a19 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_line_type.rs @@ -0,0 +1,79 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OrderLine { + pub order_line_key: u64, + pub ol_w_id: u16, + pub ol_d_id: u8, + pub ol_o_id: u32, + pub ol_number: u8, + pub ol_i_id: u32, + pub ol_supply_w_id: u16, + pub ol_delivery_d: Option<__sdk::Timestamp>, + pub ol_quantity: u32, + pub ol_amount_cents: i64, + pub ol_dist_info: String, +} + +impl __sdk::InModule for OrderLine { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `OrderLine`. +/// +/// Provides typed access to columns for query building. +pub struct OrderLineCols { + pub order_line_key: __sdk::__query_builder::Col, + pub ol_w_id: __sdk::__query_builder::Col, + pub ol_d_id: __sdk::__query_builder::Col, + pub ol_o_id: __sdk::__query_builder::Col, + pub ol_number: __sdk::__query_builder::Col, + pub ol_i_id: __sdk::__query_builder::Col, + pub ol_supply_w_id: __sdk::__query_builder::Col, + pub ol_delivery_d: __sdk::__query_builder::Col>, + pub ol_quantity: __sdk::__query_builder::Col, + pub ol_amount_cents: __sdk::__query_builder::Col, + pub ol_dist_info: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for OrderLine { + type Cols = OrderLineCols; + fn cols(table_name: &'static str) -> Self::Cols { + OrderLineCols { + order_line_key: __sdk::__query_builder::Col::new(table_name, "order_line_key"), + ol_w_id: __sdk::__query_builder::Col::new(table_name, "ol_w_id"), + ol_d_id: __sdk::__query_builder::Col::new(table_name, "ol_d_id"), + ol_o_id: __sdk::__query_builder::Col::new(table_name, "ol_o_id"), + ol_number: __sdk::__query_builder::Col::new(table_name, "ol_number"), + ol_i_id: __sdk::__query_builder::Col::new(table_name, "ol_i_id"), + ol_supply_w_id: __sdk::__query_builder::Col::new(table_name, "ol_supply_w_id"), + ol_delivery_d: __sdk::__query_builder::Col::new(table_name, "ol_delivery_d"), + ol_quantity: __sdk::__query_builder::Col::new(table_name, "ol_quantity"), + ol_amount_cents: __sdk::__query_builder::Col::new(table_name, "ol_amount_cents"), + ol_dist_info: __sdk::__query_builder::Col::new(table_name, "ol_dist_info"), + } + } +} + +/// Indexed column accessor struct for the table `OrderLine`. +/// +/// Provides typed access to indexed columns for query building. +pub struct OrderLineIxCols { + pub order_line_key: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for OrderLine { + type IxCols = OrderLineIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + OrderLineIxCols { + order_line_key: __sdk::__query_builder::IxCol::new(table_name, "order_line_key"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for OrderLine {} diff --git a/tools/tpcc-runner/src/module_bindings/order_status_line_result_type.rs b/tools/tpcc-runner/src/module_bindings/order_status_line_result_type.rs new file mode 100644 index 00000000000..a4bd806a60a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_status_line_result_type.rs @@ -0,0 +1,19 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OrderStatusLineResult { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, + pub amount_cents: i64, + pub delivery_d: Option<__sdk::Timestamp>, +} + +impl __sdk::InModule for OrderStatusLineResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs b/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs new file mode 100644 index 00000000000..d0e44d9c7f4 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_selector_type::CustomerSelector; +use super::order_status_result_type::OrderStatusResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct OrderStatusArgs { + pub w_id: u16, + pub d_id: u8, + pub customer: CustomerSelector, +} + +impl __sdk::InModule for OrderStatusArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `order_status`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait order_status { + fn order_status(&self, w_id: u16, d_id: u8, customer: CustomerSelector) { + self.order_status_then(w_id, d_id, customer, |_, _| {}); + } + + fn order_status_then( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl order_status for super::RemoteProcedures { + fn order_status_then( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "order_status", + OrderStatusArgs { w_id, d_id, customer }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/order_status_result_type.rs b/tools/tpcc-runner/src/module_bindings/order_status_result_type.rs new file mode 100644 index 00000000000..0c5a387e7a0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_status_result_type.rs @@ -0,0 +1,25 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::order_status_line_result_type::OrderStatusLineResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OrderStatusResult { + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub order_id: Option, + pub order_entry_d: Option<__sdk::Timestamp>, + pub carrier_id: Option, + pub lines: Vec, +} + +impl __sdk::InModule for OrderStatusResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/payment_procedure.rs b/tools/tpcc-runner/src/module_bindings/payment_procedure.rs new file mode 100644 index 00000000000..1557b3e07fe --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/payment_procedure.rs @@ -0,0 +1,85 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_selector_type::CustomerSelector; +use super::payment_result_type::PaymentResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct PaymentArgs { + pub w_id: u16, + pub d_id: u8, + pub c_w_id: u16, + pub c_d_id: u8, + pub customer: CustomerSelector, + pub payment_amount_cents: i64, +} + +impl __sdk::InModule for PaymentArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `payment`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait payment { + fn payment( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + ) { + self.payment_then(w_id, d_id, c_w_id, c_d_id, customer, payment_amount_cents, |_, _| {}); + } + + fn payment_then( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl payment for super::RemoteProcedures { + fn payment_then( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "payment", + PaymentArgs { + w_id, + d_id, + c_w_id, + c_d_id, + customer, + payment_amount_cents, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/payment_result_type.rs b/tools/tpcc-runner/src/module_bindings/payment_result_type.rs new file mode 100644 index 00000000000..0b33b3ff60e --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/payment_result_type.rs @@ -0,0 +1,25 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct PaymentResult { + pub warehouse_name: String, + pub district_name: String, + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub customer_credit: String, + pub customer_discount_bps: i32, + pub payment_amount_cents: i64, + pub customer_data: Option, +} + +impl __sdk::InModule for PaymentResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs b/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs new file mode 100644 index 00000000000..f42b2b1883a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs @@ -0,0 +1,84 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_queue_ack_type::DeliveryQueueAck; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct QueueDeliveryArgs { + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub w_id: u16, + pub carrier_id: u8, +} + +impl __sdk::InModule for QueueDeliveryArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `queue_delivery`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait queue_delivery { + fn queue_delivery( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + ) { + self.queue_delivery_then(run_id, driver_id, terminal_id, request_id, w_id, carrier_id, |_, _| {}); + } + + fn queue_delivery_then( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl queue_delivery for super::RemoteProcedures { + fn queue_delivery_then( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "queue_delivery", + QueueDeliveryArgs { + run_id, + driver_id, + terminal_id, + request_id, + w_id, + carrier_id, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs b/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs new file mode 100644 index 00000000000..da9424c94c4 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct ResetTpccArgs {} + +impl From for super::Reducer { + fn from(args: ResetTpccArgs) -> Self { + Self::ResetTpcc + } +} + +impl __sdk::InModule for ResetTpccArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `reset_tpcc`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait reset_tpcc { + /// Request that the remote module invoke the reducer `reset_tpcc` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`reset_tpcc:reset_tpcc_then`] to run a callback after the reducer completes. + fn reset_tpcc(&self) -> __sdk::Result<()> { + self.reset_tpcc_then(|_, _| {}) + } + + /// Request that the remote module invoke the reducer `reset_tpcc` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn reset_tpcc_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl reset_tpcc for super::RemoteReducers { + fn reset_tpcc_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(ResetTpccArgs {}, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs b/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs new file mode 100644 index 00000000000..cecefcbbf0e --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs @@ -0,0 +1,60 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::stock_level_result_type::StockLevelResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct StockLevelArgs { + pub w_id: u16, + pub d_id: u8, + pub threshold: i32, +} + +impl __sdk::InModule for StockLevelArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `stock_level`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait stock_level { + fn stock_level(&self, w_id: u16, d_id: u8, threshold: i32) { + self.stock_level_then(w_id, d_id, threshold, |_, _| {}); + } + + fn stock_level_then( + &self, + w_id: u16, + d_id: u8, + threshold: i32, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl stock_level for super::RemoteProcedures { + fn stock_level_then( + &self, + w_id: u16, + d_id: u8, + threshold: i32, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "stock_level", + StockLevelArgs { w_id, d_id, threshold }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/stock_level_result_type.rs b/tools/tpcc-runner/src/module_bindings/stock_level_result_type.rs new file mode 100644 index 00000000000..426e4853363 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/stock_level_result_type.rs @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct StockLevelResult { + pub warehouse_id: u16, + pub district_id: u8, + pub threshold: i32, + pub low_stock_count: u32, +} + +impl __sdk::InModule for StockLevelResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/stock_type.rs b/tools/tpcc-runner/src/module_bindings/stock_type.rs new file mode 100644 index 00000000000..dedd442f83a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/stock_type.rs @@ -0,0 +1,100 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Stock { + pub stock_key: u64, + pub s_w_id: u16, + pub s_i_id: u32, + pub s_quantity: i32, + pub s_dist_01: String, + pub s_dist_02: String, + pub s_dist_03: String, + pub s_dist_04: String, + pub s_dist_05: String, + pub s_dist_06: String, + pub s_dist_07: String, + pub s_dist_08: String, + pub s_dist_09: String, + pub s_dist_10: String, + pub s_ytd: u64, + pub s_order_cnt: u32, + pub s_remote_cnt: u32, + pub s_data: String, +} + +impl __sdk::InModule for Stock { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Stock`. +/// +/// Provides typed access to columns for query building. +pub struct StockCols { + pub stock_key: __sdk::__query_builder::Col, + pub s_w_id: __sdk::__query_builder::Col, + pub s_i_id: __sdk::__query_builder::Col, + pub s_quantity: __sdk::__query_builder::Col, + pub s_dist_01: __sdk::__query_builder::Col, + pub s_dist_02: __sdk::__query_builder::Col, + pub s_dist_03: __sdk::__query_builder::Col, + pub s_dist_04: __sdk::__query_builder::Col, + pub s_dist_05: __sdk::__query_builder::Col, + pub s_dist_06: __sdk::__query_builder::Col, + pub s_dist_07: __sdk::__query_builder::Col, + pub s_dist_08: __sdk::__query_builder::Col, + pub s_dist_09: __sdk::__query_builder::Col, + pub s_dist_10: __sdk::__query_builder::Col, + pub s_ytd: __sdk::__query_builder::Col, + pub s_order_cnt: __sdk::__query_builder::Col, + pub s_remote_cnt: __sdk::__query_builder::Col, + pub s_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Stock { + type Cols = StockCols; + fn cols(table_name: &'static str) -> Self::Cols { + StockCols { + stock_key: __sdk::__query_builder::Col::new(table_name, "stock_key"), + s_w_id: __sdk::__query_builder::Col::new(table_name, "s_w_id"), + s_i_id: __sdk::__query_builder::Col::new(table_name, "s_i_id"), + s_quantity: __sdk::__query_builder::Col::new(table_name, "s_quantity"), + s_dist_01: __sdk::__query_builder::Col::new(table_name, "s_dist_01"), + s_dist_02: __sdk::__query_builder::Col::new(table_name, "s_dist_02"), + s_dist_03: __sdk::__query_builder::Col::new(table_name, "s_dist_03"), + s_dist_04: __sdk::__query_builder::Col::new(table_name, "s_dist_04"), + s_dist_05: __sdk::__query_builder::Col::new(table_name, "s_dist_05"), + s_dist_06: __sdk::__query_builder::Col::new(table_name, "s_dist_06"), + s_dist_07: __sdk::__query_builder::Col::new(table_name, "s_dist_07"), + s_dist_08: __sdk::__query_builder::Col::new(table_name, "s_dist_08"), + s_dist_09: __sdk::__query_builder::Col::new(table_name, "s_dist_09"), + s_dist_10: __sdk::__query_builder::Col::new(table_name, "s_dist_10"), + s_ytd: __sdk::__query_builder::Col::new(table_name, "s_ytd"), + s_order_cnt: __sdk::__query_builder::Col::new(table_name, "s_order_cnt"), + s_remote_cnt: __sdk::__query_builder::Col::new(table_name, "s_remote_cnt"), + s_data: __sdk::__query_builder::Col::new(table_name, "s_data"), + } + } +} + +/// Indexed column accessor struct for the table `Stock`. +/// +/// Provides typed access to indexed columns for query building. +pub struct StockIxCols { + pub stock_key: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for Stock { + type IxCols = StockIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + StockIxCols { + stock_key: __sdk::__query_builder::IxCol::new(table_name, "stock_key"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Stock {} diff --git a/tools/tpcc-runner/src/module_bindings/warehouse_type.rs b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs new file mode 100644 index 00000000000..66f02971a4f --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs @@ -0,0 +1,73 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Warehouse { + pub w_id: u16, + pub w_name: String, + pub w_street_1: String, + pub w_street_2: String, + pub w_city: String, + pub w_state: String, + pub w_zip: String, + pub w_tax_bps: i32, + pub w_ytd_cents: i64, +} + +impl __sdk::InModule for Warehouse { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Warehouse`. +/// +/// Provides typed access to columns for query building. +pub struct WarehouseCols { + pub w_id: __sdk::__query_builder::Col, + pub w_name: __sdk::__query_builder::Col, + pub w_street_1: __sdk::__query_builder::Col, + pub w_street_2: __sdk::__query_builder::Col, + pub w_city: __sdk::__query_builder::Col, + pub w_state: __sdk::__query_builder::Col, + pub w_zip: __sdk::__query_builder::Col, + pub w_tax_bps: __sdk::__query_builder::Col, + pub w_ytd_cents: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Warehouse { + type Cols = WarehouseCols; + fn cols(table_name: &'static str) -> Self::Cols { + WarehouseCols { + w_id: __sdk::__query_builder::Col::new(table_name, "w_id"), + w_name: __sdk::__query_builder::Col::new(table_name, "w_name"), + w_street_1: __sdk::__query_builder::Col::new(table_name, "w_street_1"), + w_street_2: __sdk::__query_builder::Col::new(table_name, "w_street_2"), + w_city: __sdk::__query_builder::Col::new(table_name, "w_city"), + w_state: __sdk::__query_builder::Col::new(table_name, "w_state"), + w_zip: __sdk::__query_builder::Col::new(table_name, "w_zip"), + w_tax_bps: __sdk::__query_builder::Col::new(table_name, "w_tax_bps"), + w_ytd_cents: __sdk::__query_builder::Col::new(table_name, "w_ytd_cents"), + } + } +} + +/// Indexed column accessor struct for the table `Warehouse`. +/// +/// Provides typed access to indexed columns for query building. +pub struct WarehouseIxCols { + pub w_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for Warehouse { + type IxCols = WarehouseIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + WarehouseIxCols { + w_id: __sdk::__query_builder::IxCol::new(table_name, "w_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Warehouse {} diff --git a/tools/tpcc-runner/src/protocol.rs b/tools/tpcc-runner/src/protocol.rs new file mode 100644 index 00000000000..5d76fb94ea1 --- /dev/null +++ b/tools/tpcc-runner/src/protocol.rs @@ -0,0 +1,36 @@ +use serde::{Deserialize, Serialize}; + +use crate::summary::DriverSummary; + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RegisterDriverRequest { + pub driver_id: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RegisterDriverResponse { + pub accepted: bool, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RunSchedule { + pub run_id: String, + pub warmup_start_ms: u64, + pub measure_start_ms: u64, + pub measure_end_ms: u64, + pub stop_ms: u64, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScheduleResponse { + pub ready: bool, + pub schedule: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct SubmitSummaryRequest { + pub summary: DriverSummary, +} diff --git a/tools/tpcc-runner/src/summary.rs b/tools/tpcc-runner/src/summary.rs new file mode 100644 index 00000000000..6cacce6cf76 --- /dev/null +++ b/tools/tpcc-runner/src/summary.rs @@ -0,0 +1,585 @@ +use anyhow::{Context, Result}; +use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::fs::File; +use std::io::{BufWriter, Write}; +use std::path::Path; +use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::module_bindings::DeliveryCompletionView; + +const HISTOGRAM_BUCKETS_MS: [u64; 16] = [ + 1, 2, 5, 10, 20, 50, 100, 200, 500, 1_000, 2_000, 5_000, 10_000, 20_000, 60_000, 120_000, +]; + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum TransactionKind { + NewOrder, + Payment, + OrderStatus, + Delivery, + StockLevel, +} + +impl TransactionKind { + pub const ALL: [Self; 5] = [ + Self::NewOrder, + Self::Payment, + Self::OrderStatus, + Self::Delivery, + Self::StockLevel, + ]; + + pub fn as_str(self) -> &'static str { + match self { + Self::NewOrder => "new_order", + Self::Payment => "payment", + Self::OrderStatus => "order_status", + Self::Delivery => "delivery", + Self::StockLevel => "stock_level", + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Histogram { + pub buckets_ms: Vec, + pub counts: Vec, + pub count: u64, + pub sum_ms: u64, + pub max_ms: u64, +} + +impl Default for Histogram { + fn default() -> Self { + Self { + buckets_ms: HISTOGRAM_BUCKETS_MS.to_vec(), + counts: vec![0; HISTOGRAM_BUCKETS_MS.len() + 1], + count: 0, + sum_ms: 0, + max_ms: 0, + } + } +} + +impl Histogram { + pub fn record(&mut self, value_ms: u64) { + let index = self + .buckets_ms + .iter() + .position(|upper| value_ms <= *upper) + .unwrap_or(self.buckets_ms.len()); + self.counts[index] += 1; + self.count += 1; + self.sum_ms += value_ms; + self.max_ms = self.max_ms.max(value_ms); + } + + pub fn merge(&mut self, other: &Histogram) { + self.count += other.count; + self.sum_ms += other.sum_ms; + self.max_ms = self.max_ms.max(other.max_ms); + for (left, right) in self.counts.iter_mut().zip(&other.counts) { + *left += right; + } + } + + pub fn mean_ms(&self) -> f64 { + if self.count == 0 { + 0.0 + } else { + self.sum_ms as f64 / self.count as f64 + } + } + + pub fn percentile_ms(&self, pct: f64) -> u64 { + if self.count == 0 { + return 0; + } + let wanted = ((self.count as f64) * pct).ceil() as u64; + let mut seen = 0u64; + for (idx, count) in self.counts.iter().enumerate() { + seen += *count; + if seen >= wanted { + return if idx < self.buckets_ms.len() { + self.buckets_ms[idx] + } else { + self.max_ms + }; + } + } + self.max_ms + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct TransactionSummary { + pub count: u64, + pub success: u64, + pub failure: u64, + pub mean_latency_ms: f64, + pub p50_latency_ms: u64, + pub p95_latency_ms: u64, + pub p99_latency_ms: u64, + pub max_latency_ms: u64, + pub histogram: Histogram, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct ConformanceSummary { + pub new_order_rollbacks: u64, + pub new_order_total: u64, + pub new_order_remote_order_lines: u64, + pub new_order_total_order_lines: u64, + pub payment_remote: u64, + pub payment_total: u64, + pub payment_by_last_name: u64, + pub order_status_by_last_name: u64, + pub order_status_total: u64, + pub delivery_queued: u64, + pub delivery_completed: u64, + pub delivery_processed_districts: u64, + pub delivery_skipped_districts: u64, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DeliverySummary { + pub queued: u64, + pub completed: u64, + pub pending: u64, + pub processed_districts: u64, + pub skipped_districts: u64, + pub completion_mean_ms: f64, + pub completion_p50_ms: u64, + pub completion_p95_ms: u64, + pub completion_p99_ms: u64, + pub completion_max_ms: u64, + pub completion_histogram: Histogram, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DriverSummary { + pub run_id: String, + pub driver_id: String, + pub uri: String, + pub database: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, + pub warmup_secs: u64, + pub measure_secs: u64, + pub measure_start_ms: u64, + pub measure_end_ms: u64, + pub generated_at_ms: u64, + pub total_transactions: u64, + pub tpmc_like: f64, + pub transaction_mix: BTreeMap, + pub conformance: ConformanceSummary, + pub transactions: BTreeMap, + pub delivery: DeliverySummary, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct AggregateSummary { + pub run_id: String, + pub driver_count: usize, + pub drivers: Vec, + pub generated_at_ms: u64, + pub total_transactions: u64, + pub tpmc_like: f64, + pub transaction_mix: BTreeMap, + pub conformance: ConformanceSummary, + pub transactions: BTreeMap, + pub delivery: DeliverySummary, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct EventLine { + timestamp_ms: u64, + run_id: String, + driver_id: String, + terminal_id: u32, + transaction: String, + success: bool, + latency_ms: u64, + rollback: bool, + remote: bool, + by_last_name: bool, + order_line_count: u32, + remote_order_line_count: u32, + detail: Option, +} + +#[derive(Clone, Debug)] +pub struct TransactionRecord { + pub timestamp_ms: u64, + pub terminal_id: u32, + pub kind: TransactionKind, + pub success: bool, + pub latency_ms: u64, + pub rollback: bool, + pub remote: bool, + pub by_last_name: bool, + pub order_line_count: u32, + pub remote_order_line_count: u32, + pub detail: Option, +} + +#[derive(Default)] +struct TransactionAccumulator { + success: u64, + failure: u64, + histogram: Histogram, +} + +impl TransactionAccumulator { + fn record(&mut self, success: bool, latency_ms: u64) { + if success { + self.success += 1; + } else { + self.failure += 1; + } + self.histogram.record(latency_ms); + } + + fn to_summary(&self) -> TransactionSummary { + TransactionSummary { + count: self.success + self.failure, + success: self.success, + failure: self.failure, + mean_latency_ms: self.histogram.mean_ms(), + p50_latency_ms: self.histogram.percentile_ms(0.50), + p95_latency_ms: self.histogram.percentile_ms(0.95), + p99_latency_ms: self.histogram.percentile_ms(0.99), + max_latency_ms: self.histogram.max_ms, + histogram: self.histogram.clone(), + } + } +} + +pub struct MetricsCollector { + run_id: String, + driver_id: String, + writer: BufWriter, + by_kind: BTreeMap<&'static str, TransactionAccumulator>, + conformance: ConformanceSummary, + delivery_completion_histogram: Histogram, +} + +#[derive(Clone)] +pub struct SharedMetrics { + inner: Arc>, +} + +#[derive(Clone, Debug)] +pub struct DriverSummaryMeta { + pub run_id: String, + pub driver_id: String, + pub uri: String, + pub database: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, + pub warmup_secs: u64, + pub measure_secs: u64, + pub measure_start_ms: u64, + pub measure_end_ms: u64, +} + +impl SharedMetrics { + pub fn create(run_id: &str, driver_id: &str, path: &Path) -> Result { + let file = File::create(path).with_context(|| format!("failed to create {}", path.display()))?; + let collector = MetricsCollector { + run_id: run_id.to_string(), + driver_id: driver_id.to_string(), + writer: BufWriter::new(file), + by_kind: TransactionKind::ALL + .into_iter() + .map(|kind| (kind.as_str(), TransactionAccumulator::default())) + .collect(), + conformance: ConformanceSummary::default(), + delivery_completion_histogram: Histogram::default(), + }; + Ok(Self { + inner: Arc::new(Mutex::new(collector)), + }) + } + + pub fn record(&self, event: TransactionRecord) -> Result<()> { + let mut collector = self.inner.lock(); + collector.record(event) + } + + pub fn record_delivery_completion(&self, completion: &DeliveryCompletionView) { + let mut collector = self.inner.lock(); + collector.record_delivery_completion(completion); + } + + pub fn delivery_queued(&self) -> u64 { + self.inner.lock().conformance.delivery_queued + } + + pub fn finalize(self, meta: DriverSummaryMeta) -> Result { + self.inner.lock().finalize(meta) + } +} + +impl MetricsCollector { + fn record(&mut self, event: TransactionRecord) -> Result<()> { + let line = EventLine { + timestamp_ms: event.timestamp_ms, + run_id: self.run_id.clone(), + driver_id: self.driver_id.clone(), + terminal_id: event.terminal_id, + transaction: event.kind.as_str().to_string(), + success: event.success, + latency_ms: event.latency_ms, + rollback: event.rollback, + remote: event.remote, + by_last_name: event.by_last_name, + order_line_count: event.order_line_count, + remote_order_line_count: event.remote_order_line_count, + detail: event.detail.clone(), + }; + serde_json::to_writer(&mut self.writer, &line)?; + self.writer.write_all(b"\n")?; + + let accumulator = self + .by_kind + .get_mut(event.kind.as_str()) + .expect("all transaction kinds registered"); + accumulator.record(event.success, event.latency_ms); + + match event.kind { + TransactionKind::NewOrder => { + self.conformance.new_order_total += 1; + self.conformance.new_order_total_order_lines += u64::from(event.order_line_count); + self.conformance.new_order_remote_order_lines += u64::from(event.remote_order_line_count); + if event.rollback { + self.conformance.new_order_rollbacks += 1; + } + } + TransactionKind::Payment => { + self.conformance.payment_total += 1; + if event.remote { + self.conformance.payment_remote += 1; + } + if event.by_last_name { + self.conformance.payment_by_last_name += 1; + } + } + TransactionKind::OrderStatus => { + self.conformance.order_status_total += 1; + if event.by_last_name { + self.conformance.order_status_by_last_name += 1; + } + } + TransactionKind::Delivery => { + self.conformance.delivery_queued += 1; + } + TransactionKind::StockLevel => {} + } + Ok(()) + } + + fn record_delivery_completion(&mut self, completion: &DeliveryCompletionView) { + self.conformance.delivery_completed += 1; + self.conformance.delivery_processed_districts += u64::from(completion.processed_districts); + self.conformance.delivery_skipped_districts += u64::from(completion.skipped_districts); + let lag_ms = completion + .completed_at + .to_micros_since_unix_epoch() + .saturating_sub(completion.queued_at.to_micros_since_unix_epoch()) + .max(0) as u64 + / 1_000; + self.delivery_completion_histogram.record(lag_ms); + } + + fn finalize(&mut self, meta: DriverSummaryMeta) -> Result { + self.writer.flush()?; + + let mut transactions = BTreeMap::new(); + let mut total_transactions = 0u64; + for kind in TransactionKind::ALL { + let summary = self + .by_kind + .get(kind.as_str()) + .expect("transaction kind exists") + .to_summary(); + total_transactions += summary.count; + transactions.insert(kind.as_str().to_string(), summary); + } + + let mut mix = BTreeMap::new(); + for kind in TransactionKind::ALL { + let count = transactions + .get(kind.as_str()) + .map(|summary| summary.count) + .unwrap_or(0); + let ratio = if total_transactions == 0 { + 0.0 + } else { + (count as f64) * 100.0 / (total_transactions as f64) + }; + mix.insert(kind.as_str().to_string(), ratio); + } + + let measure_minutes = if meta.measure_secs == 0 { + 0.0 + } else { + meta.measure_secs as f64 / 60.0 + }; + let new_order_success = transactions + .get(TransactionKind::NewOrder.as_str()) + .map(|summary| summary.success) + .unwrap_or(0); + let tpmc_like = if measure_minutes == 0.0 { + 0.0 + } else { + new_order_success as f64 / measure_minutes + }; + + let delivery_completed = self.conformance.delivery_completed; + let delivery_queued = self.conformance.delivery_queued; + let delivery = DeliverySummary { + queued: delivery_queued, + completed: delivery_completed, + pending: delivery_queued.saturating_sub(delivery_completed), + processed_districts: self.conformance.delivery_processed_districts, + skipped_districts: self.conformance.delivery_skipped_districts, + completion_mean_ms: self.delivery_completion_histogram.mean_ms(), + completion_p50_ms: self.delivery_completion_histogram.percentile_ms(0.50), + completion_p95_ms: self.delivery_completion_histogram.percentile_ms(0.95), + completion_p99_ms: self.delivery_completion_histogram.percentile_ms(0.99), + completion_max_ms: self.delivery_completion_histogram.max_ms, + completion_histogram: self.delivery_completion_histogram.clone(), + }; + + Ok(DriverSummary { + run_id: meta.run_id, + driver_id: meta.driver_id, + uri: meta.uri, + database: meta.database, + terminal_start: meta.terminal_start, + terminals: meta.terminals, + warehouse_count: meta.warehouse_count, + warmup_secs: meta.warmup_secs, + measure_secs: meta.measure_secs, + measure_start_ms: meta.measure_start_ms, + measure_end_ms: meta.measure_end_ms, + generated_at_ms: now_millis(), + total_transactions, + tpmc_like, + transaction_mix: mix, + conformance: self.conformance.clone(), + transactions, + delivery, + }) + } +} + +pub fn aggregate_summaries(run_id: String, summaries: &[DriverSummary]) -> AggregateSummary { + let mut by_kind: BTreeMap = TransactionKind::ALL + .into_iter() + .map(|kind| (kind.as_str().to_string(), TransactionAccumulator::default())) + .collect(); + let mut total_transactions = 0u64; + let mut conformance = ConformanceSummary::default(); + let mut delivery_histogram = Histogram::default(); + let mut driver_names = Vec::with_capacity(summaries.len()); + + for summary in summaries { + driver_names.push(summary.driver_id.clone()); + total_transactions += summary.total_transactions; + conformance.new_order_rollbacks += summary.conformance.new_order_rollbacks; + conformance.new_order_total += summary.conformance.new_order_total; + conformance.new_order_remote_order_lines += summary.conformance.new_order_remote_order_lines; + conformance.new_order_total_order_lines += summary.conformance.new_order_total_order_lines; + conformance.payment_remote += summary.conformance.payment_remote; + conformance.payment_total += summary.conformance.payment_total; + conformance.payment_by_last_name += summary.conformance.payment_by_last_name; + conformance.order_status_by_last_name += summary.conformance.order_status_by_last_name; + conformance.order_status_total += summary.conformance.order_status_total; + conformance.delivery_queued += summary.conformance.delivery_queued; + conformance.delivery_completed += summary.conformance.delivery_completed; + conformance.delivery_processed_districts += summary.conformance.delivery_processed_districts; + conformance.delivery_skipped_districts += summary.conformance.delivery_skipped_districts; + delivery_histogram.merge(&summary.delivery.completion_histogram); + + for (name, txn) in &summary.transactions { + let acc = by_kind.get_mut(name).expect("kind exists"); + acc.success += txn.success; + acc.failure += txn.failure; + acc.histogram.merge(&txn.histogram); + } + } + + let mut transactions = BTreeMap::new(); + let mut mix = BTreeMap::new(); + for (name, acc) in by_kind { + let summary = acc.to_summary(); + let ratio = if total_transactions == 0 { + 0.0 + } else { + (summary.count as f64) * 100.0 / (total_transactions as f64) + }; + mix.insert(name.clone(), ratio); + transactions.insert(name, summary); + } + + let measure_secs = summaries.first().map(|summary| summary.measure_secs).unwrap_or(0); + let measure_minutes = if measure_secs == 0 { + 0.0 + } else { + measure_secs as f64 / 60.0 + }; + let tpmc_like = if measure_minutes == 0.0 { + 0.0 + } else { + transactions + .get(TransactionKind::NewOrder.as_str()) + .map(|summary| summary.success as f64 / measure_minutes) + .unwrap_or(0.0) + }; + + AggregateSummary { + run_id, + driver_count: summaries.len(), + drivers: driver_names, + generated_at_ms: now_millis(), + total_transactions, + tpmc_like, + transaction_mix: mix, + conformance: conformance.clone(), + transactions, + delivery: DeliverySummary { + queued: conformance.delivery_queued, + completed: conformance.delivery_completed, + pending: conformance + .delivery_queued + .saturating_sub(conformance.delivery_completed), + processed_districts: conformance.delivery_processed_districts, + skipped_districts: conformance.delivery_skipped_districts, + completion_mean_ms: delivery_histogram.mean_ms(), + completion_p50_ms: delivery_histogram.percentile_ms(0.50), + completion_p95_ms: delivery_histogram.percentile_ms(0.95), + completion_p99_ms: delivery_histogram.percentile_ms(0.99), + completion_max_ms: delivery_histogram.max_ms, + completion_histogram: delivery_histogram, + }, + } +} + +pub fn write_json(path: &Path, value: &T) -> Result<()> { + let file = File::create(path).with_context(|| format!("failed to create {}", path.display()))?; + serde_json::to_writer_pretty(file, value).with_context(|| format!("failed to write {}", path.display())) +} + +pub fn now_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("system clock before unix epoch") + .as_millis() as u64 +} diff --git a/tools/tpcc-runner/src/tpcc.rs b/tools/tpcc-runner/src/tpcc.rs new file mode 100644 index 00000000000..3e4ba0f587e --- /dev/null +++ b/tools/tpcc-runner/src/tpcc.rs @@ -0,0 +1,192 @@ +use rand::Rng; +use std::thread; +use std::time::Duration; + +use crate::summary::TransactionKind; + +pub const DISTRICTS_PER_WAREHOUSE: u8 = 10; +pub const CUSTOMERS_PER_DISTRICT: u32 = 3_000; +pub const ITEMS: u32 = 100_000; +pub const NEW_ORDER_START: u32 = 2_101; + +const LAST_NAME_PARTS: [&str; 10] = [ + "BAR", "OUGHT", "ABLE", "PRI", "PRES", "ESE", "ANTI", "CALLY", "ATION", "EING", +]; + +#[derive(Clone, Debug)] +pub struct RunConstants { + pub c_last: u32, + pub c_id: u32, + pub order_line_item: u32, +} + +#[derive(Clone, Copy, Debug)] +pub struct TerminalAssignment { + pub terminal_id: u32, + pub warehouse_id: u16, + pub district_id: u8, +} + +pub fn assign_terminal(terminal_id: u32, warehouse_count: u16) -> Option { + let zero = terminal_id.checked_sub(1)?; + let warehouse_zero = zero / u32::from(DISTRICTS_PER_WAREHOUSE); + if warehouse_zero >= u32::from(warehouse_count) { + return None; + } + let district_zero = zero % u32::from(DISTRICTS_PER_WAREHOUSE); + Some(TerminalAssignment { + terminal_id, + warehouse_id: (warehouse_zero + 1) as u16, + district_id: (district_zero + 1) as u8, + }) +} + +pub fn choose_transaction(rng: &mut R) -> TransactionKind { + let roll = rng.random_range(1..=100); + match roll { + 1..=45 => TransactionKind::NewOrder, + 46..=88 => TransactionKind::Payment, + 89..=92 => TransactionKind::OrderStatus, + 93..=96 => TransactionKind::Delivery, + _ => TransactionKind::StockLevel, + } +} + +pub fn generate_run_constants(rng: &mut R) -> RunConstants { + RunConstants { + c_last: rng.random_range(0..=255), + c_id: rng.random_range(0..=1_023), + order_line_item: rng.random_range(0..=8_191), + } +} + +pub fn nurand(rng: &mut R, a: u32, x: u32, y: u32, c: u32) -> u32 { + (((rng.random_range(0..=a) | rng.random_range(x..=y)) + c) % (y - x + 1)) + x +} + +pub fn customer_id(rng: &mut R, constants: &RunConstants) -> u32 { + nurand(rng, 1_023, 1, CUSTOMERS_PER_DISTRICT, constants.c_id) +} + +pub fn item_id(rng: &mut R, constants: &RunConstants) -> u32 { + nurand(rng, 8_191, 1, ITEMS, constants.order_line_item) +} + +pub fn customer_last_name(rng: &mut R, constants: &RunConstants) -> String { + make_last_name(nurand(rng, 255, 0, 999, constants.c_last)) +} + +pub fn make_last_name(num: u32) -> String { + let hundreds = ((num / 100) % 10) as usize; + let tens = ((num / 10) % 10) as usize; + let ones = (num % 10) as usize; + format!( + "{}{}{}", + LAST_NAME_PARTS[hundreds], LAST_NAME_PARTS[tens], LAST_NAME_PARTS[ones] + ) +} + +pub fn alpha_string(rng: &mut R, min_len: usize, max_len: usize) -> String { + let len = rng.random_range(min_len..=max_len); + (0..len).map(|_| (b'A' + rng.random_range(0..26)) as char).collect() +} + +pub fn numeric_string(rng: &mut R, min_len: usize, max_len: usize) -> String { + let len = rng.random_range(min_len..=max_len); + (0..len).map(|_| (b'0' + rng.random_range(0..10)) as char).collect() +} + +pub fn alpha_numeric_string(rng: &mut R, min_len: usize, max_len: usize) -> String { + let len = rng.random_range(min_len..=max_len); + (0..len) + .map(|_| { + if rng.random_bool(0.5) { + (b'A' + rng.random_range(0..26)) as char + } else { + (b'0' + rng.random_range(0..10)) as char + } + }) + .collect() +} + +pub fn zip_code(rng: &mut R) -> String { + format!("{}11111", numeric_string(rng, 4, 4)) +} + +pub fn maybe_with_original(rng: &mut R, min_len: usize, max_len: usize) -> String { + let mut data = alpha_numeric_string(rng, min_len, max_len); + if rng.random_bool(0.10) && data.len() >= 8 { + let start = rng.random_range(0..=(data.len() - 8)); + data.replace_range(start..start + 8, "ORIGINAL"); + } + data +} + +pub fn pack_district_key(w_id: u16, d_id: u8) -> u32 { + (u32::from(w_id) * 100) + u32::from(d_id) +} + +pub fn pack_customer_key(w_id: u16, d_id: u8, c_id: u32) -> u64 { + ((u64::from(w_id) * 100) + u64::from(d_id)) * 10_000 + u64::from(c_id) +} + +pub fn pack_stock_key(w_id: u16, item_id: u32) -> u64 { + u64::from(w_id) * 1_000_000 + u64::from(item_id) +} + +pub fn pack_order_key(w_id: u16, d_id: u8, o_id: u32) -> u64 { + ((u64::from(w_id) * 100) + u64::from(d_id)) * 10_000_000 + u64::from(o_id) +} + +pub fn pack_order_line_key(w_id: u16, d_id: u8, o_id: u32, ol_number: u8) -> u64 { + pack_order_key(w_id, d_id, o_id) * 100 + u64::from(ol_number) +} + +pub fn keying_time(kind: TransactionKind, scale: f64) -> Duration { + scaled_duration( + match kind { + TransactionKind::NewOrder => 18.0, + TransactionKind::Payment => 3.0, + TransactionKind::OrderStatus => 2.0, + TransactionKind::Delivery => 2.0, + TransactionKind::StockLevel => 2.0, + }, + scale, + ) +} + +pub fn think_time(kind: TransactionKind, scale: f64, rng: &mut R) -> Duration { + let mean_secs = match kind { + TransactionKind::NewOrder => 12.0, + TransactionKind::Payment => 12.0, + TransactionKind::OrderStatus => 10.0, + TransactionKind::Delivery => 5.0, + TransactionKind::StockLevel => 5.0, + }; + if scale <= 0.0 { + return Duration::ZERO; + } + let mean_secs = mean_secs * scale; + let uniform = rng.random_range(f64::MIN_POSITIVE..1.0); + let sample = (-mean_secs * uniform.ln()).min(mean_secs * 10.0); + Duration::from_secs_f64(sample) +} + +fn scaled_duration(base_secs: f64, scale: f64) -> Duration { + if scale <= 0.0 { + Duration::ZERO + } else { + Duration::from_secs_f64(base_secs * scale) + } +} + +pub fn sleep_until_ms(target_ms: u64) { + loop { + let now = crate::summary::now_millis(); + if now >= target_ms { + return; + } + let remaining = target_ms - now; + thread::sleep(Duration::from_millis(remaining.min(100))); + } +} From 1a74fe9ea4120e1cbc7004d5cb42a443034e238f Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Thu, 26 Mar 2026 17:46:31 -0400 Subject: [PATCH 02/38] Start rewriting tpcc module to do IDC for cross-warehouse transactions Incomplete. I've mostly rewritten `new_order`, which I think is the hard one. I haven't yet started rewriting `payment`. `new_order` is still incomplete in that I haven't wired up reporting of `s_quantity`. This is an isolation hazard, as discussed out-of-band. --- Cargo.lock | 3 + modules/tpcc/Cargo.toml | 3 + modules/tpcc/src/lib.rs | 439 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 425 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0fa10cea95d..5f813791827 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9792,8 +9792,11 @@ name = "tpcc-module" version = "0.1.0" dependencies = [ "anyhow", + "http 1.3.1", "log", + "serde_json", "spacetimedb 2.1.0", + "spacetimedb-sats 2.1.0", ] [[package]] diff --git a/modules/tpcc/Cargo.toml b/modules/tpcc/Cargo.toml index e21da7f3a8a..a82e91cb971 100644 --- a/modules/tpcc/Cargo.toml +++ b/modules/tpcc/Cargo.toml @@ -10,6 +10,9 @@ crate-type = ["cdylib"] anyhow.workspace = true log.workspace = true spacetimedb = { workspace = true, features = ["unstable"] } +spacetimedb-sats = { workspace = true, features = ["serde"] } +http.workspace = true +serde_json.workspace = true [lints] workspace = true diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index ec3c7737663..d231ee0d6b1 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,7 +1,9 @@ +use http::Request; use spacetimedb::{ - procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, + http::Timeout, procedure, reducer, sats::serde::SerdeWrapper, table, Identity, ProcedureContext, ReducerContext, + ScheduleAt, SpacetimeType, Table, Timestamp, TxContext, }; -use std::collections::BTreeSet; +use std::{collections::BTreeSet, time::Duration}; const DISTRICTS_PER_WAREHOUSE: u8 = 10; const CUSTOMERS_PER_DISTRICT: u32 = 3_000; @@ -9,6 +11,15 @@ const ITEMS: u32 = 100_000; const MAX_C_DATA_LEN: usize = 500; const TAX_SCALE: i64 = 10_000; +#[spacetimedb::table(accessor = spacetimedb_uri)] +struct SpacetimeDbUri { + uri: String, +} + +fn get_spacetimedb_uri(tx: &TxContext) -> String { + tx.db.spacetimedb_uri().iter().next().unwrap().uri +} + macro_rules! ensure { ($cond:expr, $($arg:tt)+) => { if !($cond) { @@ -23,10 +34,12 @@ pub enum CustomerSelector { ByLastName(String), } +type WarehouseId = u16; + #[derive(Clone, Debug, SpacetimeType)] pub struct NewOrderLineInput { pub item_id: u32, - pub supply_w_id: u16, + pub supply_w_id: WarehouseId, pub quantity: u32, } @@ -34,7 +47,7 @@ pub struct NewOrderLineInput { pub struct NewOrderLineResult { pub item_id: u32, pub item_name: String, - pub supply_w_id: u16, + pub supply_w_id: WarehouseId, pub quantity: u32, pub stock_quantity: i32, pub item_price_cents: i64, @@ -74,7 +87,7 @@ pub struct PaymentResult { #[derive(Clone, Debug, SpacetimeType)] pub struct OrderStatusLineResult { pub item_id: u32, - pub supply_w_id: u16, + pub supply_w_id: WarehouseId, pub quantity: u32, pub amount_cents: i64, pub delivery_d: Option, @@ -95,7 +108,7 @@ pub struct OrderStatusResult { #[derive(Clone, Debug, SpacetimeType)] pub struct StockLevelResult { - pub warehouse_id: u16, + pub warehouse_id: WarehouseId, pub district_id: u8, pub threshold: i32, pub low_stock_count: u32, @@ -105,7 +118,7 @@ pub struct StockLevelResult { pub struct DeliveryQueueAck { pub scheduled_id: u64, pub queued_at: Timestamp, - pub warehouse_id: u16, + pub warehouse_id: WarehouseId, pub carrier_id: u8, } @@ -123,7 +136,7 @@ pub struct DeliveryCompletionView { pub driver_id: String, pub terminal_id: u32, pub request_id: u64, - pub warehouse_id: u16, + pub warehouse_id: WarehouseId, pub carrier_id: u8, pub queued_at: Timestamp, pub completed_at: Timestamp, @@ -135,7 +148,7 @@ pub struct DeliveryCompletionView { #[derive(Clone, Debug)] pub struct Warehouse { #[primary_key] - pub w_id: u16, + pub w_id: WarehouseId, pub w_name: String, pub w_street_1: String, pub w_street_2: String, @@ -144,6 +157,13 @@ pub struct Warehouse { pub w_zip: String, pub w_tax_bps: i32, pub w_ytd_cents: i64, + + /// Added by us: the [`Identity`] of the remote database where this warehouse is sharded, + /// or `None` if this warehouse is sharded in the local database. + /// + /// TPC-C 1.4.7: "Attributes may be added and/or duplicated from one table to another + /// as long as these changes do not improve performance." + pub remote_database_home: Option, } #[table( @@ -154,7 +174,7 @@ pub struct Warehouse { pub struct District { #[primary_key] pub district_key: u32, - pub d_w_id: u16, + pub d_w_id: WarehouseId, pub d_id: u8, pub d_name: String, pub d_street_1: String, @@ -176,7 +196,7 @@ pub struct District { pub struct Customer { #[primary_key] pub customer_key: u64, - pub c_w_id: u16, + pub c_w_id: WarehouseId, pub c_d_id: u8, pub c_id: u32, pub c_first: String, @@ -207,7 +227,7 @@ pub struct History { pub history_id: u64, pub h_c_id: u32, pub h_c_d_id: u8, - pub h_c_w_id: u16, + pub h_c_w_id: WarehouseId, pub h_d_id: u8, pub h_w_id: u16, pub h_date: Timestamp, @@ -234,7 +254,7 @@ pub struct Item { pub struct Stock { #[primary_key] pub stock_key: u64, - pub s_w_id: u16, + pub s_w_id: WarehouseId, pub s_i_id: u32, pub s_quantity: i32, pub s_dist_01: String, @@ -262,7 +282,7 @@ pub struct Stock { pub struct OOrder { #[primary_key] pub order_key: u64, - pub o_w_id: u16, + pub o_w_id: WarehouseId, pub o_d_id: u8, pub o_id: u32, pub o_c_id: u32, @@ -280,7 +300,7 @@ pub struct OOrder { pub struct NewOrder { #[primary_key] pub new_order_key: u64, - pub no_w_id: u16, + pub no_w_id: WarehouseId, pub no_d_id: u8, pub no_o_id: u32, } @@ -293,7 +313,7 @@ pub struct NewOrder { pub struct OrderLine { #[primary_key] pub order_line_key: u64, - pub ol_w_id: u16, + pub ol_w_id: WarehouseId, pub ol_d_id: u8, pub ol_o_id: u32, pub ol_number: u8, @@ -321,7 +341,7 @@ pub struct DeliveryJob { pub terminal_id: u32, pub request_id: u64, pub queued_at: Timestamp, - pub w_id: u16, + pub w_id: WarehouseId, pub carrier_id: u8, pub next_d_id: u8, pub skipped_districts: u8, @@ -341,7 +361,7 @@ pub struct DeliveryCompletion { pub driver_id: String, pub terminal_id: u32, pub request_id: u64, - pub warehouse_id: u16, + pub warehouse_id: WarehouseId, pub carrier_id: u8, pub queued_at: Timestamp, pub completed_at: Timestamp, @@ -483,7 +503,374 @@ pub fn new_order( c_id: u32, order_lines: Vec, ) -> Result { - ctx.try_with_tx(|tx| new_order_tx(tx, w_id, d_id, c_id, order_lines.clone())) + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), + "district id out of range" + ); + ensure!( + (5..=15).contains(&order_lines.len()), + "new-order requires between 5 and 15 order lines" + ); + + // Setup TX: validate warehouse, district, customer ID. + // These never change in TPC-C, so we don't need to include the checks in the same transaction as the rest of the work. + let (warehouse, district, customer, spacetimedb_uri) = ctx.try_with_tx(|tx| { + let warehouse = find_warehouse(tx, w_id)?; + let district = find_district(tx, w_id, d_id)?; + let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; + let spacetimedb_uri = get_spacetimedb_uri(tx); + Ok::<_, String>((warehouse, district, customer, spacetimedb_uri)) + })?; + + let (local_database_items, remote_database_items, all_local_warehouse) = ctx.try_with_tx(|tx| { + let mut local_database_items: Vec<(usize, NewOrderLineInput, Item, bool)> = + Vec::with_capacity(order_lines.len()); + let mut remote_database_items: Vec<(usize, NewOrderLineInput, Item, Identity)> = + Vec::with_capacity(order_lines.len()); + + // Whether this order applies only to a single warehouse. + // This may be `false` even when `remote_database_items_to_get` is non-empty, + // as we may run multiple warehouses from the same database. + let mut all_local_warehouse = true; + + for (idx, line) in order_lines.iter().enumerate() { + ensure!(line.quantity > 0, "order line quantity must be positive"); + + let is_remote_warehouse = line.supply_w_id == w_id; + all_local_warehouse &= is_remote_warehouse; + + let warehouse = tx + .db + .warehouse() + .w_id() + .find(line.supply_w_id) + .ok_or_else(|| format!("No such warehouse: {}", line.supply_w_id))?; + + // TECHNICALLY NON-CONFORMANT: If we encounter a non-existent item in the order, + // we'll short-circuit and exit here. + // TPC-C technically requires, in 2.4.2.3, that we still retrieve and process all the valid item numbers. + // This would be a horrendous pain to implement, so we won't. + // We don't do the things the spec tells us it doesn't want us to do, namely: + // - changing the execution of other steps + // - using a different type of transaction + // But we do skip inspecting some number of valid items and stocks. + let item = find_item(tx, line.item_id)?; + match warehouse.remote_database_home { + None => { + // Warehouse is local to this database. + // We'll actually "process" the items, i.e. decrement the stock and sum the order price, + // after we look up and process all the remote items. + local_database_items.push((idx, NewOrderLineInput::clone(line), item, is_remote_warehouse)); + } + Some(remote_database_identity) => { + // Warehouse is on another database; we'll have to do a remote request. + // This is *really* non-conformant. + // TODO(docs): link to blog post justifying this. + remote_database_items.push((idx, NewOrderLineInput::clone(line), item, remote_database_identity)); + } + } + } + + Ok::<_, String>((local_database_items, remote_database_items, all_local_warehouse)) + })?; + + let mut remote_item_reservations: Vec = Vec::with_capacity(remote_database_items.len()); + + for (_idx, line, item, remote_database_ident) in &remote_database_items { + match call_remote_function( + ctx, + &spacetimedb_uri, + *remote_database_ident, + "reserve_item_for_remote_order", + vec![serde_json::json!(spacetimedb_sats::serde::SerdeWrapper( + ReserveItemInput { + line: NewOrderLineInput::clone(line), + district: d_id, + } + ))], + ) { + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + remote_database_items, + remote_item_reservations, + ); + return Err(format!("Error reserving remote item: {e}")); + } + Ok(body) => { + let body = body.into_string().expect("Body should be valid UTF-8"); + let res: SerdeWrapper> = + serde_json::from_str(&body).expect("Response does not conform to expected schema"); + match res.0 { + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + remote_database_items, + remote_item_reservations, + ); + return Err(format!("Error reserving remote item from database: {e}")); + } + Ok(output) => remote_item_reservations.push(output), + } + } + }; + } + + match ctx.try_with_tx(|tx| { + let district = tx + .db + .district() + .district_key() + .find(district.district_key) + .expect("District should not have been removed since we retrieved it last"); + let order_id = district.d_next_o_id; + tx.db.district().district_key().update(District { + d_next_o_id: order_id + 1, + ..district + }); + + let mut subtotal_cents = 0; + + let line_results = local_database_items + .iter() + .map(|(idx, line, item, is_remote_warehouse)| { + let stock = find_stock(tx, line.supply_w_id, line.item_id).expect("Stock should exist for all items"); + tx.db.stock().stock_key().update(Stock { + s_quantity: adjust_stock_quantity(stock.s_quantity, line.quantity as i32), + s_ytd: stock.s_ytd + line.quantity as u64, + s_order_cnt: stock.s_order_cnt + 1, + s_remote_cnt: stock.s_remote_cnt + u32::from(*is_remote_warehouse), + ..stock.clone() + }); + + (idx, line, item, district_stock_info(&stock, d_id), stock.s_data) + }) + .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( + |((idx, line, item, _remote_db_ident), reservation)| { + (idx, line, item, reservation.s_dist, reservation.s_data) + }, + )) + .map(|(idx, line, item, s_dist, s_data)| { + let line_amount_cents = line.quantity as i64 * item.i_price_cents; + subtotal_cents += line_amount_cents; + let brand_generic = if contains_original(&item.i_data) && contains_original(&s_data) { + "B" + } else { + "G" + }; + tx.db.order_line().insert(OrderLine { + order_line_key: pack_order_line_key(w_id, d_id, order_id, (idx + 1) as u8), + ol_w_id: w_id, + ol_d_id: d_id, + ol_o_id: order_id, + ol_number: (idx + 1) as u8, + ol_i_id: line.item_id, + ol_supply_w_id: line.supply_w_id, + ol_delivery_d: None, + ol_quantity: line.quantity, + ol_amount_cents: line_amount_cents, + ol_dist_info: s_dist, + }); + + NewOrderLineResult { + item_id: item.i_id, + item_name: item.i_name, + supply_w_id: line.supply_w_id, + quantity: line.quantity, + stock_quantity: updated_stock_quantity, + item_price_cents: item.i_price_cents, + amount_cents: line_amount_cents, + brand_generic: brand_generic.to_string(), + } + }) + .collect::>(); + + let taxed = apply_tax( + subtotal_cents, + i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), + ); + let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); + + Ok(NewOrderResult { + warehouse_tax_bps: warehouse.w_tax_bps, + district_tax_bps: district.d_tax_bps, + customer_discount_bps: customer.c_discount_bps, + customer_last: customer.c_last, + customer_credit: customer.c_credit, + order_id, + entry_d: tx.timestamp, + total_amount_cents, + all_local: all_local_warehouse, + lines: line_results, + }) + }) { + Ok(result) => { + confirm_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + remote_database_items, + remote_item_reservations, + ); + Ok(result) + } + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + remote_database_items, + remote_item_reservations, + ); + Err(e) + } + } +} + +fn call_remote_function( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + database_ident: Identity, + function_name: &str, + arguments: Vec, +) -> Result { + let request = Request::builder() + .uri(format!( + "{spacetimedb_uri}/v1/database/{database_ident}/call/{function_name}" + )) + .method("POST") + // TODO(auth): include a token. + .body(serde_json::json!(arguments).to_string()) + .map_err(|e| format!("Error constructing `Request`: {e}"))?; + match ctx.http.send(request) { + Err(e) => Err(format!("Error sending request to remote database {database_ident} at URI {spacetimedb_uri} to call {function_name}: {e}")), + Ok(response) if response.status() != http::status::StatusCode::OK => Err(format!("Got non-200 response code {} from request to remote database {database_ident} at URI {spacetimedb_uri} when calling {function_name}: {}", response.status(), response.into_body().into_string_lossy())), + Ok(response) => Ok(response.into_body()), + } +} + +fn rollback_all_remote_item_reservations( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + remote_items: Vec<(usize, NewOrderLineInput, Item, Identity)>, + reservations: Vec, +) { + for ((_idx, _line, _item, remote_database_ident), reservation) in + remote_items.into_iter().zip(reservations.into_iter()) + { + if let Err(e) = call_remote_function( + ctx, + spacetimedb_uri, + remote_database_ident, + "rollback_item_reservation", + vec![serde_json::json!(reservation.rollback_token)], + ) { + log::error!("Error rollinb back item reservation: {e}"); + } + } +} + +fn confirm_all_remote_item_reservations( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + remote_items: Vec<(usize, NewOrderLineInput, Item, Identity)>, + reservations: Vec, +) { + for ((_idx, _line, _item, remote_database_ident), reservation) in + remote_items.into_iter().zip(reservations.into_iter()) + { + if let Err(e) = call_remote_function( + ctx, + spacetimedb_uri, + remote_database_ident, + "confirm_item_reservation", + vec![serde_json::json!(reservation.rollback_token)], + ) { + log::error!("Error confirming item reservation: {e}"); + } + } +} + +#[derive(SpacetimeType)] +pub struct ReserveItemOutput { + s_dist: String, + s_data: String, + rollback_token: u64, +} + +#[table(accessor = reserved_item_log)] +pub struct ReservedItemLog { + #[primary_key] + #[auto_inc] + rollback_token: u64, + line: NewOrderLineInput, +} + +#[derive(SpacetimeType)] +pub struct ReserveItemInput { + line: NewOrderLineInput, + district: u8, +} + +#[procedure] +pub fn reserve_item_for_remote_order( + ctx: &mut ProcedureContext, + input: ReserveItemInput, +) -> Result { + let ReserveItemInput { line, district } = input; + ctx.try_with_tx(|tx| { + let stock = find_stock(tx, line.supply_w_id, line.item_id)?; + + let quantity = line.quantity; + + let ReservedItemLog { rollback_token, .. } = tx.db.reserved_item_log().insert(ReservedItemLog { + rollback_token: 0, + line: line.clone(), + }); + + let reserved = ReserveItemOutput { + s_dist: district_stock_info(&stock, district), + s_data: stock.s_data.clone(), + rollback_token, + }; + + tx.db.stock().stock_key().update(Stock { + s_quantity: adjust_stock_quantity(stock.s_quantity, quantity as i32), + s_ytd: stock.s_ytd + u64::from(quantity), + s_order_cnt: stock.s_order_cnt + 1, + s_remote_cnt: stock.s_remote_cnt + 1, + ..stock + }); + + Ok(reserved) + }) +} + +#[reducer] +pub fn rollback_item_reservation(ctx: &ReducerContext, rollback_token: u64) -> Result<(), String> { + let line = ctx + .db + .reserved_item_log() + .rollback_token() + .find(rollback_token) + .ok_or_else(|| format!("No such rollback token: {rollback_token}"))? + .line; + let stock = find_stock(ctx, line.supply_w_id, line.item_id)?; + let quantity = line.quantity; + ctx.db.stock().stock_key().update(Stock { + s_quantity: reverse_stock_quantity(stock.s_quantity, quantity as i32), + s_ytd: stock.s_ytd - line.quantity as u64, + s_order_cnt: stock.s_order_cnt - 1, + s_remote_cnt: stock.s_remote_cnt - 1, + ..stock + }); + ctx.db.reserved_item_log().rollback_token().delete(rollback_token); + Ok(()) +} + +#[reducer] +pub fn confirm_item_reservation(ctx: &ReducerContext, rollback_token: u64) { + ctx.db.reserved_item_log().rollback_token().delete(rollback_token); } #[procedure] @@ -1097,7 +1484,7 @@ fn find_item(tx: &spacetimedb::TxContext, item_id: u32) -> Result .ok_or_else(|| format!("item {item_id} not found")) } -fn find_stock(tx: &spacetimedb::TxContext, w_id: u16, item_id: u32) -> Result { +fn find_stock(tx: &ReducerContext, w_id: u16, item_id: u32) -> Result { tx.db .stock() .by_w_i() @@ -1127,6 +1514,8 @@ fn contains_original(data: &str) -> bool { } fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { + assert!(ordered_quantity >= 1); + assert!(ordered_quantity <= 10); if current_quantity - ordered_quantity >= 10 { current_quantity - ordered_quantity } else { @@ -1134,6 +1523,16 @@ fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { } } +fn reverse_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { + assert!(ordered_quantity >= 1); + assert!(ordered_quantity <= 10); + if current_quantity + ordered_quantity >= 91 { + current_quantity + ordered_quantity - 91 + } else { + current_quantity + ordered_quantity + } +} + fn apply_tax(amount_cents: i64, total_tax_bps: i64) -> i64 { amount_cents * (TAX_SCALE + total_tax_bps) / TAX_SCALE } From d35b491d39359ce1642d9ec8d144edb83c7fd91f Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Fri, 27 Mar 2026 11:30:01 -0400 Subject: [PATCH 03/38] Finish rewriting `new_order`, and commentary on non-conformance --- modules/tpcc/src/lib.rs | 549 +++++++++--------- tools/tpcc-runner/src/client.rs | 15 + tools/tpcc-runner/src/loader.rs | 4 + .../confirm_item_reservation_reducer.rs | 68 +++ tools/tpcc-runner/src/module_bindings/mod.rs | 37 +- ...reserve_item_for_remote_order_procedure.rs | 55 ++ .../reserve_item_input_type.rs | 18 + .../reserve_item_output_type.rs | 18 + .../module_bindings/reserved_item_log_type.rs | 54 ++ .../rollback_item_reservation_reducer.rs | 68 +++ .../set_spacetimedb_uri_reducer.rs | 66 +++ .../module_bindings/spacetime_db_uri_type.rs | 45 ++ .../src/module_bindings/warehouse_type.rs | 3 + 13 files changed, 735 insertions(+), 265 deletions(-) create mode 100644 tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/reserve_item_input_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/reserve_item_output_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/set_spacetimedb_uri_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index d231ee0d6b1..75d9d4895eb 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,9 +1,9 @@ use http::Request; use spacetimedb::{ - http::Timeout, procedure, reducer, sats::serde::SerdeWrapper, table, Identity, ProcedureContext, ReducerContext, - ScheduleAt, SpacetimeType, Table, Timestamp, TxContext, + procedure, reducer, sats::serde::SerdeWrapper, table, Identity, ProcedureContext, ReducerContext, ScheduleAt, + SpacetimeType, Table, Timestamp, TxContext, }; -use std::{collections::BTreeSet, time::Duration}; +use std::collections::BTreeSet; const DISTRICTS_PER_WAREHOUSE: u8 = 10; const CUSTOMERS_PER_DISTRICT: u32 = 3_000; @@ -16,6 +16,14 @@ struct SpacetimeDbUri { uri: String, } +#[reducer] +fn set_spacetimedb_uri(ctx: &ReducerContext, uri: String) { + for row in ctx.db.spacetimedb_uri().iter() { + ctx.db.spacetimedb_uri().delete(row); + } + ctx.db.spacetimedb_uri().insert(SpacetimeDbUri { uri }); +} + fn get_spacetimedb_uri(tx: &TxContext) -> String { tx.db.spacetimedb_uri().iter().next().unwrap().uri } @@ -498,7 +506,7 @@ pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<() #[procedure] pub fn new_order( ctx: &mut ProcedureContext, - w_id: u16, + w_id: WarehouseId, d_id: u8, c_id: u32, order_lines: Vec, @@ -513,7 +521,8 @@ pub fn new_order( ); // Setup TX: validate warehouse, district, customer ID. - // These never change in TPC-C, so we don't need to include the checks in the same transaction as the rest of the work. + // NON-CONFORMANT: These never change in TPC-C, + // so we don't need to include the checks in the same transaction as the rest of the work. let (warehouse, district, customer, spacetimedb_uri) = ctx.try_with_tx(|tx| { let warehouse = find_warehouse(tx, w_id)?; let district = find_district(tx, w_id, d_id)?; @@ -522,11 +531,129 @@ pub fn new_order( Ok::<_, String>((warehouse, district, customer, spacetimedb_uri)) })?; - let (local_database_items, remote_database_items, all_local_warehouse) = ctx.try_with_tx(|tx| { - let mut local_database_items: Vec<(usize, NewOrderLineInput, Item, bool)> = - Vec::with_capacity(order_lines.len()); - let mut remote_database_items: Vec<(usize, NewOrderLineInput, Item, Identity)> = - Vec::with_capacity(order_lines.len()); + let PartitionedItems { + local_database_items, + remote_database_items, + all_local_warehouse, + } = + // Look up all of the items in the order, and fail if any of them doesn't exist. + // If they all exist, sort them into two groups: + // - `local_database_items`, items in warehouses managed by this database. + // - `remote_database_items`, items in warehouses managed by remote databases. + // Also compute `all_local_warehouse`, which says if all of the items are in the warehouse `w_id`. + // NON-CONFORMANT: This is a separate transaction from the later one, + // which updates stock quantities for the local items and records the new order. + // In a real system, an item might change between the two, but none of the TPC-C transactions writes to items. + // We (ab)use this knowledge to skip compensating for writes to items. + partition_local_from_remote_database_items(ctx, w_id, &order_lines)?; + + // NON-CONFORMANT: We reserve items from the remote database extra-transactionally. + // If our TPC-C transaction fails, we'll roll back those reservations. + // This opens us up to dirty read isolation hazards, + // where a concurrent transaction may observe a change in stock quantity that later rolls back. + // This will never happen with only the TPC-C transactions, + // as stock quantity is only written by the `new_order` transaction, + // and `new_order` can only fail prior to updating the stock quantity, due to non-existent items. + // We (ab)use this knowledge to skip compensating for rollbacks to prevent dirty reads. + let remote_item_reservations = reserve_remote_items(ctx, &spacetimedb_uri, d_id, &remote_database_items)?; + + match ctx.try_with_tx(|tx| { + let district = tx + .db + .district() + .district_key() + .find(district.district_key) + .expect("District should not have been removed since we retrieved it last"); + let order_id = district.d_next_o_id; + tx.db.district().district_key().update(District { + d_next_o_id: order_id + 1, + ..district + }); + + let line_results = local_database_items + .iter() + .map(|local_item| claim_stock_for_local_database_item(tx, local_item, d_id)) + .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( + |(remote_item, reserved_item)| remote_item_to_processed_new_order_item(remote_item, reserved_item), + )) + .map(|processed_item| insert_order_line(tx, w_id, d_id, order_id, processed_item)) + .collect::>(); + + let subtotal_cents = line_results.iter().map(|line_result| line_result.amount_cents).sum(); + + let taxed = apply_tax( + subtotal_cents, + i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), + ); + let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); + + Ok(NewOrderResult { + warehouse_tax_bps: warehouse.w_tax_bps, + district_tax_bps: district.d_tax_bps, + customer_discount_bps: customer.c_discount_bps, + customer_last: customer.c_last.clone(), + customer_credit: customer.c_credit.clone(), + order_id, + entry_d: tx.timestamp, + total_amount_cents, + all_local: all_local_warehouse, + lines: line_results, + }) + }) { + Ok(result) => { + confirm_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + &remote_database_items, + remote_item_reservations, + ); + Ok(result) + } + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + &remote_database_items, + remote_item_reservations, + ); + Err(e) + } + } +} + +struct LocalDatabaseItem { + idx: usize, + line: NewOrderLineInput, + item: Item, + is_remote_warehouse: bool, +} + +struct RemoteDatabaseItem { + idx: usize, + line: NewOrderLineInput, + item: Item, + remote_database_identity: Identity, +} + +struct PartitionedItems { + local_database_items: Vec, + remote_database_items: Vec, + + /// Are all items from the same warehouse as the requesting terminal? + /// + /// Note that this may be false even if all items are partitioned into [`Self::local_database_items`], + /// as we may manage multiple warehouses with a single database. + all_local_warehouse: bool, +} + +fn partition_local_from_remote_database_items( + ctx: &mut ProcedureContext, + local_warehouse_id: WarehouseId, + order_lines: &[NewOrderLineInput], +) -> Result { + ctx.try_with_tx(|tx| { + let mut local_database_items: Vec = Vec::with_capacity(order_lines.len()); + let mut remote_database_items: Vec = Vec::with_capacity(order_lines.len()); // Whether this order applies only to a single warehouse. // This may be `false` even when `remote_database_items_to_get` is non-empty, @@ -536,8 +663,8 @@ pub fn new_order( for (idx, line) in order_lines.iter().enumerate() { ensure!(line.quantity > 0, "order line quantity must be positive"); - let is_remote_warehouse = line.supply_w_id == w_id; - all_local_warehouse &= is_remote_warehouse; + let is_remote_warehouse = line.supply_w_id == local_warehouse_id; + all_local_warehouse &= !is_remote_warehouse; let warehouse = tx .db @@ -560,32 +687,58 @@ pub fn new_order( // Warehouse is local to this database. // We'll actually "process" the items, i.e. decrement the stock and sum the order price, // after we look up and process all the remote items. - local_database_items.push((idx, NewOrderLineInput::clone(line), item, is_remote_warehouse)); + local_database_items.push(LocalDatabaseItem { + idx, + line: line.clone(), + item, + is_remote_warehouse, + }); } Some(remote_database_identity) => { // Warehouse is on another database; we'll have to do a remote request. // This is *really* non-conformant. // TODO(docs): link to blog post justifying this. - remote_database_items.push((idx, NewOrderLineInput::clone(line), item, remote_database_identity)); + remote_database_items.push(RemoteDatabaseItem { + idx, + line: line.clone(), + item, + remote_database_identity, + }); } } } - Ok::<_, String>((local_database_items, remote_database_items, all_local_warehouse)) - })?; + Ok(PartitionedItems { + local_database_items, + remote_database_items, + all_local_warehouse, + }) + }) +} +fn reserve_remote_items( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + district_id: u8, + remote_database_items: &[RemoteDatabaseItem], +) -> Result, String> { let mut remote_item_reservations: Vec = Vec::with_capacity(remote_database_items.len()); - for (_idx, line, item, remote_database_ident) in &remote_database_items { + for RemoteDatabaseItem { + line, + remote_database_identity, + .. + } in remote_database_items + { match call_remote_function( ctx, &spacetimedb_uri, - *remote_database_ident, + *remote_database_identity, "reserve_item_for_remote_order", vec![serde_json::json!(spacetimedb_sats::serde::SerdeWrapper( ReserveItemInput { line: NewOrderLineInput::clone(line), - district: d_id, + district: district_id, } ))], ) { @@ -618,113 +771,7 @@ pub fn new_order( }; } - match ctx.try_with_tx(|tx| { - let district = tx - .db - .district() - .district_key() - .find(district.district_key) - .expect("District should not have been removed since we retrieved it last"); - let order_id = district.d_next_o_id; - tx.db.district().district_key().update(District { - d_next_o_id: order_id + 1, - ..district - }); - - let mut subtotal_cents = 0; - - let line_results = local_database_items - .iter() - .map(|(idx, line, item, is_remote_warehouse)| { - let stock = find_stock(tx, line.supply_w_id, line.item_id).expect("Stock should exist for all items"); - tx.db.stock().stock_key().update(Stock { - s_quantity: adjust_stock_quantity(stock.s_quantity, line.quantity as i32), - s_ytd: stock.s_ytd + line.quantity as u64, - s_order_cnt: stock.s_order_cnt + 1, - s_remote_cnt: stock.s_remote_cnt + u32::from(*is_remote_warehouse), - ..stock.clone() - }); - - (idx, line, item, district_stock_info(&stock, d_id), stock.s_data) - }) - .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( - |((idx, line, item, _remote_db_ident), reservation)| { - (idx, line, item, reservation.s_dist, reservation.s_data) - }, - )) - .map(|(idx, line, item, s_dist, s_data)| { - let line_amount_cents = line.quantity as i64 * item.i_price_cents; - subtotal_cents += line_amount_cents; - let brand_generic = if contains_original(&item.i_data) && contains_original(&s_data) { - "B" - } else { - "G" - }; - tx.db.order_line().insert(OrderLine { - order_line_key: pack_order_line_key(w_id, d_id, order_id, (idx + 1) as u8), - ol_w_id: w_id, - ol_d_id: d_id, - ol_o_id: order_id, - ol_number: (idx + 1) as u8, - ol_i_id: line.item_id, - ol_supply_w_id: line.supply_w_id, - ol_delivery_d: None, - ol_quantity: line.quantity, - ol_amount_cents: line_amount_cents, - ol_dist_info: s_dist, - }); - - NewOrderLineResult { - item_id: item.i_id, - item_name: item.i_name, - supply_w_id: line.supply_w_id, - quantity: line.quantity, - stock_quantity: updated_stock_quantity, - item_price_cents: item.i_price_cents, - amount_cents: line_amount_cents, - brand_generic: brand_generic.to_string(), - } - }) - .collect::>(); - - let taxed = apply_tax( - subtotal_cents, - i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), - ); - let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); - - Ok(NewOrderResult { - warehouse_tax_bps: warehouse.w_tax_bps, - district_tax_bps: district.d_tax_bps, - customer_discount_bps: customer.c_discount_bps, - customer_last: customer.c_last, - customer_credit: customer.c_credit, - order_id, - entry_d: tx.timestamp, - total_amount_cents, - all_local: all_local_warehouse, - lines: line_results, - }) - }) { - Ok(result) => { - confirm_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - remote_database_items, - remote_item_reservations, - ); - Ok(result) - } - Err(e) => { - rollback_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - remote_database_items, - remote_item_reservations, - ); - Err(e) - } - } + Ok(remote_item_reservations) } fn call_remote_function( @@ -752,16 +799,14 @@ fn call_remote_function( fn rollback_all_remote_item_reservations( ctx: &mut ProcedureContext, spacetimedb_uri: &str, - remote_items: Vec<(usize, NewOrderLineInput, Item, Identity)>, + remote_items: &[RemoteDatabaseItem], reservations: Vec, ) { - for ((_idx, _line, _item, remote_database_ident), reservation) in - remote_items.into_iter().zip(reservations.into_iter()) - { + for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { if let Err(e) = call_remote_function( ctx, spacetimedb_uri, - remote_database_ident, + remote_item.remote_database_identity, "rollback_item_reservation", vec![serde_json::json!(reservation.rollback_token)], ) { @@ -773,16 +818,14 @@ fn rollback_all_remote_item_reservations( fn confirm_all_remote_item_reservations( ctx: &mut ProcedureContext, spacetimedb_uri: &str, - remote_items: Vec<(usize, NewOrderLineInput, Item, Identity)>, + remote_items: &[RemoteDatabaseItem], reservations: Vec, ) { - for ((_idx, _line, _item, remote_database_ident), reservation) in - remote_items.into_iter().zip(reservations.into_iter()) - { + for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { if let Err(e) = call_remote_function( ctx, spacetimedb_uri, - remote_database_ident, + remote_item.remote_database_identity, "confirm_item_reservation", vec![serde_json::json!(reservation.rollback_token)], ) { @@ -791,10 +834,107 @@ fn confirm_all_remote_item_reservations( } } +struct ProcessedNewOrderItem { + idx: usize, + line: NewOrderLineInput, + item: Item, + district_stock_info: String, + stock_data: String, + updated_quantity: i32, +} + +fn claim_stock_for_local_database_item( + tx: &TxContext, + local_item: &LocalDatabaseItem, + district_id: u8, +) -> ProcessedNewOrderItem { + let stock = + find_stock(tx, local_item.line.supply_w_id, local_item.line.item_id).expect("Stock should exist for all items"); + let updated_quantity = adjust_stock_quantity(stock.s_quantity, local_item.line.quantity as i32); + tx.db.stock().stock_key().update(Stock { + s_quantity: updated_quantity, + s_ytd: stock.s_ytd + local_item.line.quantity as u64, + s_order_cnt: stock.s_order_cnt + 1, + s_remote_cnt: stock.s_remote_cnt + u32::from(local_item.is_remote_warehouse), + ..stock.clone() + }); + + ProcessedNewOrderItem { + idx: local_item.idx, + line: local_item.line.clone(), + item: local_item.item.clone(), + district_stock_info: district_stock_info(&stock, district_id), + stock_data: stock.s_data.clone(), + updated_quantity, + } +} + +fn remote_item_to_processed_new_order_item( + remote_item: &RemoteDatabaseItem, + reserved_item: &ReserveItemOutput, +) -> ProcessedNewOrderItem { + ProcessedNewOrderItem { + idx: remote_item.idx, + line: remote_item.line.clone(), + item: remote_item.item.clone(), + district_stock_info: reserved_item.s_dist.clone(), + stock_data: reserved_item.s_data.clone(), + updated_quantity: reserved_item.updated_quantity, + } +} + +fn insert_order_line( + tx: &TxContext, + warehouse_id: WarehouseId, + district_id: u8, + order_id: u32, + processed_item: ProcessedNewOrderItem, +) -> NewOrderLineResult { + let ProcessedNewOrderItem { + idx, + line, + item, + district_stock_info, + stock_data, + updated_quantity, + } = processed_item; + let line_amount_cents = line.quantity as i64 * item.i_price_cents; + let brand_generic = if contains_original(&item.i_data) && contains_original(&stock_data) { + "B" + } else { + "G" + }; + tx.db.order_line().insert(OrderLine { + order_line_key: pack_order_line_key(warehouse_id, district_id, order_id, (idx + 1) as u8), + ol_w_id: warehouse_id, + ol_d_id: district_id, + ol_o_id: order_id, + ol_number: (idx + 1) as u8, + ol_i_id: line.item_id, + ol_supply_w_id: line.supply_w_id, + ol_delivery_d: None, + ol_quantity: line.quantity, + ol_amount_cents: line_amount_cents, + ol_dist_info: district_stock_info, + }); + + NewOrderLineResult { + item_id: item.i_id, + item_name: item.i_name, + supply_w_id: line.supply_w_id, + quantity: line.quantity, + stock_quantity: updated_quantity, + item_price_cents: item.i_price_cents, + amount_cents: line_amount_cents, + brand_generic: brand_generic.to_string(), + } +} + #[derive(SpacetimeType)] pub struct ReserveItemOutput { s_dist: String, s_data: String, + updated_quantity: i32, rollback_token: u64, } @@ -821,23 +961,26 @@ pub fn reserve_item_for_remote_order( ctx.try_with_tx(|tx| { let stock = find_stock(tx, line.supply_w_id, line.item_id)?; - let quantity = line.quantity; - let ReservedItemLog { rollback_token, .. } = tx.db.reserved_item_log().insert(ReservedItemLog { rollback_token: 0, line: line.clone(), }); + let reserved_quantity = line.quantity; + let updated_quantity = adjust_stock_quantity(stock.s_quantity, reserved_quantity as i32); + let reserved = ReserveItemOutput { s_dist: district_stock_info(&stock, district), s_data: stock.s_data.clone(), + updated_quantity, rollback_token, }; tx.db.stock().stock_key().update(Stock { - s_quantity: adjust_stock_quantity(stock.s_quantity, quantity as i32), - s_ytd: stock.s_ytd + u64::from(quantity), + s_quantity: updated_quantity, + s_ytd: stock.s_ytd + u64::from(reserved_quantity), s_order_cnt: stock.s_order_cnt + 1, + // This must be an order from a remote warehouse, it's coming from a whole different database. s_remote_cnt: stock.s_remote_cnt + 1, ..stock }); @@ -1084,130 +1227,6 @@ fn validate_stock_row(row: &Stock) -> Result<(), String> { Ok(()) } -fn new_order_tx( - tx: &spacetimedb::TxContext, - w_id: u16, - d_id: u8, - c_id: u32, - order_lines: Vec, -) -> Result { - ensure!( - (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), - "district id out of range" - ); - ensure!( - (5..=15).contains(&order_lines.len()), - "new-order requires between 5 and 15 order lines" - ); - - let warehouse = find_warehouse(tx, w_id)?; - let district = find_district(tx, w_id, d_id)?; - let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; - - let mut touched_items = Vec::with_capacity(order_lines.len()); - let mut all_local = true; - for line in &order_lines { - ensure!(line.quantity > 0, "order line quantity must be positive"); - let item = find_item(tx, line.item_id)?; - let stock = find_stock(tx, line.supply_w_id, line.item_id)?; - if line.supply_w_id != w_id { - all_local = false; - } - touched_items.push((line.clone(), item, stock)); - } - - let order_id = district.d_next_o_id; - - tx.db.district().district_key().update(District { - d_next_o_id: district.d_next_o_id + 1, - ..district.clone() - }); - - tx.db.oorder().insert(OOrder { - order_key: pack_order_key(w_id, d_id, order_id), - o_w_id: w_id, - o_d_id: d_id, - o_id: order_id, - o_c_id: c_id, - o_entry_d: tx.timestamp, - o_carrier_id: None, - o_ol_cnt: order_lines.len() as u8, - o_all_local: all_local, - }); - - tx.db.new_order_row().insert(NewOrder { - new_order_key: pack_order_key(w_id, d_id, order_id), - no_w_id: w_id, - no_d_id: d_id, - no_o_id: order_id, - }); - - let mut line_results = Vec::with_capacity(touched_items.len()); - let mut subtotal_cents = 0i64; - for (idx, (line, item, stock)) in touched_items.into_iter().enumerate() { - let updated_stock_quantity = adjust_stock_quantity(stock.s_quantity, line.quantity as i32); - tx.db.stock().stock_key().update(Stock { - s_quantity: updated_stock_quantity, - s_ytd: stock.s_ytd + u64::from(line.quantity), - s_order_cnt: stock.s_order_cnt + 1, - s_remote_cnt: stock.s_remote_cnt + u32::from(line.supply_w_id != w_id), - ..stock.clone() - }); - - let line_amount_cents = item.i_price_cents * i64::from(line.quantity); - subtotal_cents += line_amount_cents; - let dist_info = district_stock_info(&stock, d_id); - tx.db.order_line().insert(OrderLine { - order_line_key: pack_order_line_key(w_id, d_id, order_id, (idx + 1) as u8), - ol_w_id: w_id, - ol_d_id: d_id, - ol_o_id: order_id, - ol_number: (idx + 1) as u8, - ol_i_id: line.item_id, - ol_supply_w_id: line.supply_w_id, - ol_delivery_d: None, - ol_quantity: line.quantity, - ol_amount_cents: line_amount_cents, - ol_dist_info: dist_info, - }); - - let brand_generic = if contains_original(&item.i_data) && contains_original(&stock.s_data) { - "B" - } else { - "G" - }; - line_results.push(NewOrderLineResult { - item_id: item.i_id, - item_name: item.i_name, - supply_w_id: line.supply_w_id, - quantity: line.quantity, - stock_quantity: updated_stock_quantity, - item_price_cents: item.i_price_cents, - amount_cents: line_amount_cents, - brand_generic: brand_generic.to_string(), - }); - } - - let taxed = apply_tax( - subtotal_cents, - i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), - ); - let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); - - Ok(NewOrderResult { - warehouse_tax_bps: warehouse.w_tax_bps, - district_tax_bps: district.d_tax_bps, - customer_discount_bps: customer.c_discount_bps, - customer_last: customer.c_last, - customer_credit: customer.c_credit, - order_id, - entry_d: tx.timestamp, - total_amount_cents, - all_local, - lines: line_results, - }) -} - fn payment_tx(tx: &spacetimedb::TxContext, req: PaymentRequest<'_>) -> Result { ensure!(req.payment_amount_cents > 0, "payment amount must be positive"); @@ -1523,6 +1542,8 @@ fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { } } +/// NON-CONFORMANT: we're abusing the fact that TPC-C updates stock quantities in a predictable way +/// which is both commutative and associative to be able to roll back stock reservations. fn reverse_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { assert!(ordered_quantity >= 1); assert!(ordered_quantity <= 10); diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs index 01639c70eed..d685f7ee2a7 100644 --- a/tools/tpcc-runner/src/client.rs +++ b/tools/tpcc-runner/src/client.rs @@ -46,6 +46,21 @@ impl ModuleClient { }) } + pub fn set_spacetimedb_uri(&self, uri: &str) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn + .reducers + .set_spacetimedb_uri_then(uri.to_string(), move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("set_spacetimedb_uri failed: {}", message), + Ok(Err(err)) => Err(anyhow!("set_spacetimedb_uri internal error: {}", err)), + Err(_) => bail!("timed out waiting for set_spacetimedb_uri"), + } + } + pub fn reset_tpcc(&self) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.reset_tpcc_then(move |_, res| { diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index 66d3304a7a6..f181d51cb65 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -27,6 +27,8 @@ pub fn run(config: LoadConfig) -> Result<()> { client.reset_tpcc().context("failed to reset tpcc data")?; } + client.set_spacetimedb_uri(&config.connection.uri)?; + let mut rng = StdRng::seed_from_u64(0x5eed_5eed); let load_c_last = rng.random_range(0..=255); let base_ts = Timestamp::from(SystemTime::now()); @@ -89,6 +91,8 @@ fn load_warehouses_and_districts( w_zip: zip_code(rng), w_tax_bps: rng.random_range(0..=2_000), w_ytd_cents: WAREHOUSE_YTD_CENTS, + + remote_database_home: None, }); for d_id in 1..=DISTRICTS_PER_WAREHOUSE { diff --git a/tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs b/tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs new file mode 100644 index 00000000000..1f449f2ff45 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct ConfirmItemReservationArgs { + pub rollback_token: u64, +} + +impl From for super::Reducer { + fn from(args: ConfirmItemReservationArgs) -> Self { + Self::ConfirmItemReservation { + rollback_token: args.rollback_token, + } + } +} + +impl __sdk::InModule for ConfirmItemReservationArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `confirm_item_reservation`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait confirm_item_reservation { + /// Request that the remote module invoke the reducer `confirm_item_reservation` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`confirm_item_reservation:confirm_item_reservation_then`] to run a callback after the reducer completes. + fn confirm_item_reservation(&self, rollback_token: u64) -> __sdk::Result<()> { + self.confirm_item_reservation_then(rollback_token, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `confirm_item_reservation` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn confirm_item_reservation_then( + &self, + rollback_token: u64, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl confirm_item_reservation for super::RemoteReducers { + fn confirm_item_reservation_then( + &self, + rollback_token: u64, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(ConfirmItemReservationArgs { rollback_token }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/mod.rs b/tools/tpcc-runner/src/module_bindings/mod.rs index 4bca7a5d7da..85aee5e3f02 100644 --- a/tools/tpcc-runner/src/module_bindings/mod.rs +++ b/tools/tpcc-runner/src/module_bindings/mod.rs @@ -1,11 +1,12 @@ // THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE // WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. -// This was generated using spacetimedb cli version 2.1.0 (commit 36c416ff4e2b1546db51145c2bcd65070e36b416). +// This was generated using spacetimedb cli version 2.1.0 (commit d84b7a7f9ee56d9bd5674a2c4583f20873f0c695). #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; +pub mod confirm_item_reservation_reducer; pub mod customer_selector_type; pub mod customer_type; pub mod delivery_completion_type; @@ -40,12 +41,20 @@ pub mod order_status_result_type; pub mod payment_procedure; pub mod payment_result_type; pub mod queue_delivery_procedure; +pub mod reserve_item_for_remote_order_procedure; +pub mod reserve_item_input_type; +pub mod reserve_item_output_type; +pub mod reserved_item_log_type; pub mod reset_tpcc_reducer; +pub mod rollback_item_reservation_reducer; +pub mod set_spacetimedb_uri_reducer; +pub mod spacetime_db_uri_type; pub mod stock_level_procedure; pub mod stock_level_result_type; pub mod stock_type; pub mod warehouse_type; +pub use confirm_item_reservation_reducer::confirm_item_reservation; pub use customer_selector_type::CustomerSelector; pub use customer_type::Customer; pub use delivery_completion_type::DeliveryCompletion; @@ -80,7 +89,14 @@ pub use order_status_result_type::OrderStatusResult; pub use payment_procedure::payment; pub use payment_result_type::PaymentResult; pub use queue_delivery_procedure::queue_delivery; +pub use reserve_item_for_remote_order_procedure::reserve_item_for_remote_order; +pub use reserve_item_input_type::ReserveItemInput; +pub use reserve_item_output_type::ReserveItemOutput; +pub use reserved_item_log_type::ReservedItemLog; pub use reset_tpcc_reducer::reset_tpcc; +pub use rollback_item_reservation_reducer::rollback_item_reservation; +pub use set_spacetimedb_uri_reducer::set_spacetimedb_uri; +pub use spacetime_db_uri_type::SpacetimeDbUri; pub use stock_level_procedure::stock_level; pub use stock_level_result_type::StockLevelResult; pub use stock_type::Stock; @@ -94,6 +110,7 @@ pub use warehouse_type::Warehouse; /// to indicate which reducer caused the event. pub enum Reducer { + ConfirmItemReservation { rollback_token: u64 }, LoadCustomers { rows: Vec }, LoadDistricts { rows: Vec }, LoadHistory { rows: Vec }, @@ -104,6 +121,8 @@ pub enum Reducer { LoadStocks { rows: Vec }, LoadWarehouses { rows: Vec }, ResetTpcc, + RollbackItemReservation { rollback_token: u64 }, + SetSpacetimedbUri { uri: String }, } impl __sdk::InModule for Reducer { @@ -113,6 +132,7 @@ impl __sdk::InModule for Reducer { impl __sdk::Reducer for Reducer { fn reducer_name(&self) -> &'static str { match self { + Reducer::ConfirmItemReservation { .. } => "confirm_item_reservation", Reducer::LoadCustomers { .. } => "load_customers", Reducer::LoadDistricts { .. } => "load_districts", Reducer::LoadHistory { .. } => "load_history", @@ -123,12 +143,19 @@ impl __sdk::Reducer for Reducer { Reducer::LoadStocks { .. } => "load_stocks", Reducer::LoadWarehouses { .. } => "load_warehouses", Reducer::ResetTpcc => "reset_tpcc", + Reducer::RollbackItemReservation { .. } => "rollback_item_reservation", + Reducer::SetSpacetimedbUri { .. } => "set_spacetimedb_uri", _ => unreachable!(), } } #[allow(clippy::clone_on_copy)] fn args_bsatn(&self) -> Result, __sats::bsatn::EncodeError> { match self { + Reducer::ConfirmItemReservation { rollback_token } => { + __sats::bsatn::to_vec(&confirm_item_reservation_reducer::ConfirmItemReservationArgs { + rollback_token: rollback_token.clone(), + }) + } Reducer::LoadCustomers { rows } => { __sats::bsatn::to_vec(&load_customers_reducer::LoadCustomersArgs { rows: rows.clone() }) } @@ -157,6 +184,14 @@ impl __sdk::Reducer for Reducer { __sats::bsatn::to_vec(&load_warehouses_reducer::LoadWarehousesArgs { rows: rows.clone() }) } Reducer::ResetTpcc => __sats::bsatn::to_vec(&reset_tpcc_reducer::ResetTpccArgs {}), + Reducer::RollbackItemReservation { rollback_token } => { + __sats::bsatn::to_vec(&rollback_item_reservation_reducer::RollbackItemReservationArgs { + rollback_token: rollback_token.clone(), + }) + } + Reducer::SetSpacetimedbUri { uri } => { + __sats::bsatn::to_vec(&set_spacetimedb_uri_reducer::SetSpacetimedbUriArgs { uri: uri.clone() }) + } _ => unreachable!(), } } diff --git a/tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs b/tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs new file mode 100644 index 00000000000..17fd97889c0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs @@ -0,0 +1,55 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::reserve_item_input_type::ReserveItemInput; +use super::reserve_item_output_type::ReserveItemOutput; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct ReserveItemForRemoteOrderArgs { + pub input: ReserveItemInput, +} + +impl __sdk::InModule for ReserveItemForRemoteOrderArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `reserve_item_for_remote_order`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait reserve_item_for_remote_order { + fn reserve_item_for_remote_order(&self, input: ReserveItemInput) { + self.reserve_item_for_remote_order_then(input, |_, _| {}); + } + + fn reserve_item_for_remote_order_then( + &self, + input: ReserveItemInput, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl reserve_item_for_remote_order for super::RemoteProcedures { + fn reserve_item_for_remote_order_then( + &self, + input: ReserveItemInput, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "reserve_item_for_remote_order", + ReserveItemForRemoteOrderArgs { input }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/reserve_item_input_type.rs b/tools/tpcc-runner/src/module_bindings/reserve_item_input_type.rs new file mode 100644 index 00000000000..cbc3507fb38 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/reserve_item_input_type.rs @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_line_input_type::NewOrderLineInput; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct ReserveItemInput { + pub line: NewOrderLineInput, + pub district: u8, +} + +impl __sdk::InModule for ReserveItemInput { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/reserve_item_output_type.rs b/tools/tpcc-runner/src/module_bindings/reserve_item_output_type.rs new file mode 100644 index 00000000000..64822795fec --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/reserve_item_output_type.rs @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct ReserveItemOutput { + pub s_dist: String, + pub s_data: String, + pub updated_quantity: i32, + pub rollback_token: u64, +} + +impl __sdk::InModule for ReserveItemOutput { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs b/tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs new file mode 100644 index 00000000000..ccca101e097 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_line_input_type::NewOrderLineInput; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct ReservedItemLog { + pub rollback_token: u64, + pub line: NewOrderLineInput, +} + +impl __sdk::InModule for ReservedItemLog { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `ReservedItemLog`. +/// +/// Provides typed access to columns for query building. +pub struct ReservedItemLogCols { + pub rollback_token: __sdk::__query_builder::Col, + pub line: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for ReservedItemLog { + type Cols = ReservedItemLogCols; + fn cols(table_name: &'static str) -> Self::Cols { + ReservedItemLogCols { + rollback_token: __sdk::__query_builder::Col::new(table_name, "rollback_token"), + line: __sdk::__query_builder::Col::new(table_name, "line"), + } + } +} + +/// Indexed column accessor struct for the table `ReservedItemLog`. +/// +/// Provides typed access to indexed columns for query building. +pub struct ReservedItemLogIxCols { + pub rollback_token: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for ReservedItemLog { + type IxCols = ReservedItemLogIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + ReservedItemLogIxCols { + rollback_token: __sdk::__query_builder::IxCol::new(table_name, "rollback_token"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for ReservedItemLog {} diff --git a/tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs b/tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs new file mode 100644 index 00000000000..83166d957fd --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct RollbackItemReservationArgs { + pub rollback_token: u64, +} + +impl From for super::Reducer { + fn from(args: RollbackItemReservationArgs) -> Self { + Self::RollbackItemReservation { + rollback_token: args.rollback_token, + } + } +} + +impl __sdk::InModule for RollbackItemReservationArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `rollback_item_reservation`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait rollback_item_reservation { + /// Request that the remote module invoke the reducer `rollback_item_reservation` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`rollback_item_reservation:rollback_item_reservation_then`] to run a callback after the reducer completes. + fn rollback_item_reservation(&self, rollback_token: u64) -> __sdk::Result<()> { + self.rollback_item_reservation_then(rollback_token, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `rollback_item_reservation` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn rollback_item_reservation_then( + &self, + rollback_token: u64, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl rollback_item_reservation for super::RemoteReducers { + fn rollback_item_reservation_then( + &self, + rollback_token: u64, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(RollbackItemReservationArgs { rollback_token }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/set_spacetimedb_uri_reducer.rs b/tools/tpcc-runner/src/module_bindings/set_spacetimedb_uri_reducer.rs new file mode 100644 index 00000000000..52266e982ff --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/set_spacetimedb_uri_reducer.rs @@ -0,0 +1,66 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct SetSpacetimedbUriArgs { + pub uri: String, +} + +impl From for super::Reducer { + fn from(args: SetSpacetimedbUriArgs) -> Self { + Self::SetSpacetimedbUri { uri: args.uri } + } +} + +impl __sdk::InModule for SetSpacetimedbUriArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `set_spacetimedb_uri`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait set_spacetimedb_uri { + /// Request that the remote module invoke the reducer `set_spacetimedb_uri` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`set_spacetimedb_uri:set_spacetimedb_uri_then`] to run a callback after the reducer completes. + fn set_spacetimedb_uri(&self, uri: String) -> __sdk::Result<()> { + self.set_spacetimedb_uri_then(uri, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `set_spacetimedb_uri` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn set_spacetimedb_uri_then( + &self, + uri: String, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl set_spacetimedb_uri for super::RemoteReducers { + fn set_spacetimedb_uri_then( + &self, + uri: String, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(SetSpacetimedbUriArgs { uri }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs b/tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs new file mode 100644 index 00000000000..036aef1b0d3 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs @@ -0,0 +1,45 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct SpacetimeDbUri { + pub uri: String, +} + +impl __sdk::InModule for SpacetimeDbUri { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `SpacetimeDbUri`. +/// +/// Provides typed access to columns for query building. +pub struct SpacetimeDbUriCols { + pub uri: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for SpacetimeDbUri { + type Cols = SpacetimeDbUriCols; + fn cols(table_name: &'static str) -> Self::Cols { + SpacetimeDbUriCols { + uri: __sdk::__query_builder::Col::new(table_name, "uri"), + } + } +} + +/// Indexed column accessor struct for the table `SpacetimeDbUri`. +/// +/// Provides typed access to indexed columns for query building. +pub struct SpacetimeDbUriIxCols {} + +impl __sdk::__query_builder::HasIxCols for SpacetimeDbUri { + type IxCols = SpacetimeDbUriIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + SpacetimeDbUriIxCols {} + } +} + +impl __sdk::__query_builder::CanBeLookupTable for SpacetimeDbUri {} diff --git a/tools/tpcc-runner/src/module_bindings/warehouse_type.rs b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs index 66f02971a4f..ed553b73aad 100644 --- a/tools/tpcc-runner/src/module_bindings/warehouse_type.rs +++ b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs @@ -16,6 +16,7 @@ pub struct Warehouse { pub w_zip: String, pub w_tax_bps: i32, pub w_ytd_cents: i64, + pub remote_database_home: Option<__sdk::Identity>, } impl __sdk::InModule for Warehouse { @@ -35,6 +36,7 @@ pub struct WarehouseCols { pub w_zip: __sdk::__query_builder::Col, pub w_tax_bps: __sdk::__query_builder::Col, pub w_ytd_cents: __sdk::__query_builder::Col, + pub remote_database_home: __sdk::__query_builder::Col>, } impl __sdk::__query_builder::HasCols for Warehouse { @@ -50,6 +52,7 @@ impl __sdk::__query_builder::HasCols for Warehouse { w_zip: __sdk::__query_builder::Col::new(table_name, "w_zip"), w_tax_bps: __sdk::__query_builder::Col::new(table_name, "w_tax_bps"), w_ytd_cents: __sdk::__query_builder::Col::new(table_name, "w_ytd_cents"), + remote_database_home: __sdk::__query_builder::Col::new(table_name, "remote_database_home"), } } } From 9d3086cd31f8816832c8cd45624469b99b75d959 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Fri, 27 Mar 2026 12:03:06 -0400 Subject: [PATCH 04/38] Reorganize module, make tests work --- modules/tpcc/src/lib.rs | 767 +++++----------------------------- modules/tpcc/src/new_order.rs | 598 ++++++++++++++++++++++++++ modules/tpcc/src/remote.rs | 41 ++ 3 files changed, 746 insertions(+), 660 deletions(-) create mode 100644 modules/tpcc/src/new_order.rs create mode 100644 modules/tpcc/src/remote.rs diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index 75d9d4895eb..b7265410dc4 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,33 +1,8 @@ -use http::Request; use spacetimedb::{ - procedure, reducer, sats::serde::SerdeWrapper, table, Identity, ProcedureContext, ReducerContext, ScheduleAt, - SpacetimeType, Table, Timestamp, TxContext, + procedure, reducer, table, Identity, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, }; use std::collections::BTreeSet; -const DISTRICTS_PER_WAREHOUSE: u8 = 10; -const CUSTOMERS_PER_DISTRICT: u32 = 3_000; -const ITEMS: u32 = 100_000; -const MAX_C_DATA_LEN: usize = 500; -const TAX_SCALE: i64 = 10_000; - -#[spacetimedb::table(accessor = spacetimedb_uri)] -struct SpacetimeDbUri { - uri: String, -} - -#[reducer] -fn set_spacetimedb_uri(ctx: &ReducerContext, uri: String) { - for row in ctx.db.spacetimedb_uri().iter() { - ctx.db.spacetimedb_uri().delete(row); - } - ctx.db.spacetimedb_uri().insert(SpacetimeDbUri { uri }); -} - -fn get_spacetimedb_uri(tx: &TxContext) -> String { - tx.db.spacetimedb_uri().iter().next().unwrap().uri -} - macro_rules! ensure { ($cond:expr, $($arg:tt)+) => { if !($cond) { @@ -36,6 +11,15 @@ macro_rules! ensure { }; } +mod new_order; +mod remote; + +const DISTRICTS_PER_WAREHOUSE: u8 = 10; +const CUSTOMERS_PER_DISTRICT: u32 = 3_000; +const ITEMS: u32 = 100_000; +const MAX_C_DATA_LEN: usize = 500; +const TAX_SCALE: i64 = 10_000; + #[derive(Clone, Debug, SpacetimeType)] pub enum CustomerSelector { ById(u32), @@ -44,39 +28,6 @@ pub enum CustomerSelector { type WarehouseId = u16; -#[derive(Clone, Debug, SpacetimeType)] -pub struct NewOrderLineInput { - pub item_id: u32, - pub supply_w_id: WarehouseId, - pub quantity: u32, -} - -#[derive(Clone, Debug, SpacetimeType)] -pub struct NewOrderLineResult { - pub item_id: u32, - pub item_name: String, - pub supply_w_id: WarehouseId, - pub quantity: u32, - pub stock_quantity: i32, - pub item_price_cents: i64, - pub amount_cents: i64, - pub brand_generic: String, -} - -#[derive(Clone, Debug, SpacetimeType)] -pub struct NewOrderResult { - pub warehouse_tax_bps: i32, - pub district_tax_bps: i32, - pub customer_discount_bps: i32, - pub customer_last: String, - pub customer_credit: String, - pub order_id: u32, - pub entry_d: Timestamp, - pub total_amount_cents: i64, - pub all_local: bool, - pub lines: Vec, -} - #[derive(Clone, Debug, SpacetimeType)] pub struct PaymentResult { pub warehouse_name: String, @@ -503,519 +454,6 @@ pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<() Ok(()) } -#[procedure] -pub fn new_order( - ctx: &mut ProcedureContext, - w_id: WarehouseId, - d_id: u8, - c_id: u32, - order_lines: Vec, -) -> Result { - ensure!( - (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), - "district id out of range" - ); - ensure!( - (5..=15).contains(&order_lines.len()), - "new-order requires between 5 and 15 order lines" - ); - - // Setup TX: validate warehouse, district, customer ID. - // NON-CONFORMANT: These never change in TPC-C, - // so we don't need to include the checks in the same transaction as the rest of the work. - let (warehouse, district, customer, spacetimedb_uri) = ctx.try_with_tx(|tx| { - let warehouse = find_warehouse(tx, w_id)?; - let district = find_district(tx, w_id, d_id)?; - let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; - let spacetimedb_uri = get_spacetimedb_uri(tx); - Ok::<_, String>((warehouse, district, customer, spacetimedb_uri)) - })?; - - let PartitionedItems { - local_database_items, - remote_database_items, - all_local_warehouse, - } = - // Look up all of the items in the order, and fail if any of them doesn't exist. - // If they all exist, sort them into two groups: - // - `local_database_items`, items in warehouses managed by this database. - // - `remote_database_items`, items in warehouses managed by remote databases. - // Also compute `all_local_warehouse`, which says if all of the items are in the warehouse `w_id`. - // NON-CONFORMANT: This is a separate transaction from the later one, - // which updates stock quantities for the local items and records the new order. - // In a real system, an item might change between the two, but none of the TPC-C transactions writes to items. - // We (ab)use this knowledge to skip compensating for writes to items. - partition_local_from_remote_database_items(ctx, w_id, &order_lines)?; - - // NON-CONFORMANT: We reserve items from the remote database extra-transactionally. - // If our TPC-C transaction fails, we'll roll back those reservations. - // This opens us up to dirty read isolation hazards, - // where a concurrent transaction may observe a change in stock quantity that later rolls back. - // This will never happen with only the TPC-C transactions, - // as stock quantity is only written by the `new_order` transaction, - // and `new_order` can only fail prior to updating the stock quantity, due to non-existent items. - // We (ab)use this knowledge to skip compensating for rollbacks to prevent dirty reads. - let remote_item_reservations = reserve_remote_items(ctx, &spacetimedb_uri, d_id, &remote_database_items)?; - - match ctx.try_with_tx(|tx| { - let district = tx - .db - .district() - .district_key() - .find(district.district_key) - .expect("District should not have been removed since we retrieved it last"); - let order_id = district.d_next_o_id; - tx.db.district().district_key().update(District { - d_next_o_id: order_id + 1, - ..district - }); - - let line_results = local_database_items - .iter() - .map(|local_item| claim_stock_for_local_database_item(tx, local_item, d_id)) - .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( - |(remote_item, reserved_item)| remote_item_to_processed_new_order_item(remote_item, reserved_item), - )) - .map(|processed_item| insert_order_line(tx, w_id, d_id, order_id, processed_item)) - .collect::>(); - - let subtotal_cents = line_results.iter().map(|line_result| line_result.amount_cents).sum(); - - let taxed = apply_tax( - subtotal_cents, - i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), - ); - let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); - - Ok(NewOrderResult { - warehouse_tax_bps: warehouse.w_tax_bps, - district_tax_bps: district.d_tax_bps, - customer_discount_bps: customer.c_discount_bps, - customer_last: customer.c_last.clone(), - customer_credit: customer.c_credit.clone(), - order_id, - entry_d: tx.timestamp, - total_amount_cents, - all_local: all_local_warehouse, - lines: line_results, - }) - }) { - Ok(result) => { - confirm_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - &remote_database_items, - remote_item_reservations, - ); - Ok(result) - } - Err(e) => { - rollback_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - &remote_database_items, - remote_item_reservations, - ); - Err(e) - } - } -} - -struct LocalDatabaseItem { - idx: usize, - line: NewOrderLineInput, - item: Item, - is_remote_warehouse: bool, -} - -struct RemoteDatabaseItem { - idx: usize, - line: NewOrderLineInput, - item: Item, - remote_database_identity: Identity, -} - -struct PartitionedItems { - local_database_items: Vec, - remote_database_items: Vec, - - /// Are all items from the same warehouse as the requesting terminal? - /// - /// Note that this may be false even if all items are partitioned into [`Self::local_database_items`], - /// as we may manage multiple warehouses with a single database. - all_local_warehouse: bool, -} - -fn partition_local_from_remote_database_items( - ctx: &mut ProcedureContext, - local_warehouse_id: WarehouseId, - order_lines: &[NewOrderLineInput], -) -> Result { - ctx.try_with_tx(|tx| { - let mut local_database_items: Vec = Vec::with_capacity(order_lines.len()); - let mut remote_database_items: Vec = Vec::with_capacity(order_lines.len()); - - // Whether this order applies only to a single warehouse. - // This may be `false` even when `remote_database_items_to_get` is non-empty, - // as we may run multiple warehouses from the same database. - let mut all_local_warehouse = true; - - for (idx, line) in order_lines.iter().enumerate() { - ensure!(line.quantity > 0, "order line quantity must be positive"); - - let is_remote_warehouse = line.supply_w_id == local_warehouse_id; - all_local_warehouse &= !is_remote_warehouse; - - let warehouse = tx - .db - .warehouse() - .w_id() - .find(line.supply_w_id) - .ok_or_else(|| format!("No such warehouse: {}", line.supply_w_id))?; - - // TECHNICALLY NON-CONFORMANT: If we encounter a non-existent item in the order, - // we'll short-circuit and exit here. - // TPC-C technically requires, in 2.4.2.3, that we still retrieve and process all the valid item numbers. - // This would be a horrendous pain to implement, so we won't. - // We don't do the things the spec tells us it doesn't want us to do, namely: - // - changing the execution of other steps - // - using a different type of transaction - // But we do skip inspecting some number of valid items and stocks. - let item = find_item(tx, line.item_id)?; - match warehouse.remote_database_home { - None => { - // Warehouse is local to this database. - // We'll actually "process" the items, i.e. decrement the stock and sum the order price, - // after we look up and process all the remote items. - local_database_items.push(LocalDatabaseItem { - idx, - line: line.clone(), - item, - is_remote_warehouse, - }); - } - Some(remote_database_identity) => { - // Warehouse is on another database; we'll have to do a remote request. - // This is *really* non-conformant. - // TODO(docs): link to blog post justifying this. - remote_database_items.push(RemoteDatabaseItem { - idx, - line: line.clone(), - item, - remote_database_identity, - }); - } - } - } - - Ok(PartitionedItems { - local_database_items, - remote_database_items, - all_local_warehouse, - }) - }) -} - -fn reserve_remote_items( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, - district_id: u8, - remote_database_items: &[RemoteDatabaseItem], -) -> Result, String> { - let mut remote_item_reservations: Vec = Vec::with_capacity(remote_database_items.len()); - - for RemoteDatabaseItem { - line, - remote_database_identity, - .. - } in remote_database_items - { - match call_remote_function( - ctx, - &spacetimedb_uri, - *remote_database_identity, - "reserve_item_for_remote_order", - vec![serde_json::json!(spacetimedb_sats::serde::SerdeWrapper( - ReserveItemInput { - line: NewOrderLineInput::clone(line), - district: district_id, - } - ))], - ) { - Err(e) => { - rollback_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - remote_database_items, - remote_item_reservations, - ); - return Err(format!("Error reserving remote item: {e}")); - } - Ok(body) => { - let body = body.into_string().expect("Body should be valid UTF-8"); - let res: SerdeWrapper> = - serde_json::from_str(&body).expect("Response does not conform to expected schema"); - match res.0 { - Err(e) => { - rollback_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - remote_database_items, - remote_item_reservations, - ); - return Err(format!("Error reserving remote item from database: {e}")); - } - Ok(output) => remote_item_reservations.push(output), - } - } - }; - } - - Ok(remote_item_reservations) -} - -fn call_remote_function( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, - database_ident: Identity, - function_name: &str, - arguments: Vec, -) -> Result { - let request = Request::builder() - .uri(format!( - "{spacetimedb_uri}/v1/database/{database_ident}/call/{function_name}" - )) - .method("POST") - // TODO(auth): include a token. - .body(serde_json::json!(arguments).to_string()) - .map_err(|e| format!("Error constructing `Request`: {e}"))?; - match ctx.http.send(request) { - Err(e) => Err(format!("Error sending request to remote database {database_ident} at URI {spacetimedb_uri} to call {function_name}: {e}")), - Ok(response) if response.status() != http::status::StatusCode::OK => Err(format!("Got non-200 response code {} from request to remote database {database_ident} at URI {spacetimedb_uri} when calling {function_name}: {}", response.status(), response.into_body().into_string_lossy())), - Ok(response) => Ok(response.into_body()), - } -} - -fn rollback_all_remote_item_reservations( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, - remote_items: &[RemoteDatabaseItem], - reservations: Vec, -) { - for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { - if let Err(e) = call_remote_function( - ctx, - spacetimedb_uri, - remote_item.remote_database_identity, - "rollback_item_reservation", - vec![serde_json::json!(reservation.rollback_token)], - ) { - log::error!("Error rollinb back item reservation: {e}"); - } - } -} - -fn confirm_all_remote_item_reservations( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, - remote_items: &[RemoteDatabaseItem], - reservations: Vec, -) { - for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { - if let Err(e) = call_remote_function( - ctx, - spacetimedb_uri, - remote_item.remote_database_identity, - "confirm_item_reservation", - vec![serde_json::json!(reservation.rollback_token)], - ) { - log::error!("Error confirming item reservation: {e}"); - } - } -} - -struct ProcessedNewOrderItem { - idx: usize, - line: NewOrderLineInput, - item: Item, - district_stock_info: String, - stock_data: String, - updated_quantity: i32, -} - -fn claim_stock_for_local_database_item( - tx: &TxContext, - local_item: &LocalDatabaseItem, - district_id: u8, -) -> ProcessedNewOrderItem { - let stock = - find_stock(tx, local_item.line.supply_w_id, local_item.line.item_id).expect("Stock should exist for all items"); - let updated_quantity = adjust_stock_quantity(stock.s_quantity, local_item.line.quantity as i32); - tx.db.stock().stock_key().update(Stock { - s_quantity: updated_quantity, - s_ytd: stock.s_ytd + local_item.line.quantity as u64, - s_order_cnt: stock.s_order_cnt + 1, - s_remote_cnt: stock.s_remote_cnt + u32::from(local_item.is_remote_warehouse), - ..stock.clone() - }); - - ProcessedNewOrderItem { - idx: local_item.idx, - line: local_item.line.clone(), - item: local_item.item.clone(), - district_stock_info: district_stock_info(&stock, district_id), - stock_data: stock.s_data.clone(), - updated_quantity, - } -} - -fn remote_item_to_processed_new_order_item( - remote_item: &RemoteDatabaseItem, - reserved_item: &ReserveItemOutput, -) -> ProcessedNewOrderItem { - ProcessedNewOrderItem { - idx: remote_item.idx, - line: remote_item.line.clone(), - item: remote_item.item.clone(), - district_stock_info: reserved_item.s_dist.clone(), - stock_data: reserved_item.s_data.clone(), - updated_quantity: reserved_item.updated_quantity, - } -} - -fn insert_order_line( - tx: &TxContext, - warehouse_id: WarehouseId, - district_id: u8, - order_id: u32, - processed_item: ProcessedNewOrderItem, -) -> NewOrderLineResult { - let ProcessedNewOrderItem { - idx, - line, - item, - district_stock_info, - stock_data, - updated_quantity, - } = processed_item; - let line_amount_cents = line.quantity as i64 * item.i_price_cents; - let brand_generic = if contains_original(&item.i_data) && contains_original(&stock_data) { - "B" - } else { - "G" - }; - tx.db.order_line().insert(OrderLine { - order_line_key: pack_order_line_key(warehouse_id, district_id, order_id, (idx + 1) as u8), - ol_w_id: warehouse_id, - ol_d_id: district_id, - ol_o_id: order_id, - ol_number: (idx + 1) as u8, - ol_i_id: line.item_id, - ol_supply_w_id: line.supply_w_id, - ol_delivery_d: None, - ol_quantity: line.quantity, - ol_amount_cents: line_amount_cents, - ol_dist_info: district_stock_info, - }); - - NewOrderLineResult { - item_id: item.i_id, - item_name: item.i_name, - supply_w_id: line.supply_w_id, - quantity: line.quantity, - stock_quantity: updated_quantity, - item_price_cents: item.i_price_cents, - amount_cents: line_amount_cents, - brand_generic: brand_generic.to_string(), - } -} - -#[derive(SpacetimeType)] -pub struct ReserveItemOutput { - s_dist: String, - s_data: String, - updated_quantity: i32, - rollback_token: u64, -} - -#[table(accessor = reserved_item_log)] -pub struct ReservedItemLog { - #[primary_key] - #[auto_inc] - rollback_token: u64, - line: NewOrderLineInput, -} - -#[derive(SpacetimeType)] -pub struct ReserveItemInput { - line: NewOrderLineInput, - district: u8, -} - -#[procedure] -pub fn reserve_item_for_remote_order( - ctx: &mut ProcedureContext, - input: ReserveItemInput, -) -> Result { - let ReserveItemInput { line, district } = input; - ctx.try_with_tx(|tx| { - let stock = find_stock(tx, line.supply_w_id, line.item_id)?; - - let ReservedItemLog { rollback_token, .. } = tx.db.reserved_item_log().insert(ReservedItemLog { - rollback_token: 0, - line: line.clone(), - }); - - let reserved_quantity = line.quantity; - let updated_quantity = adjust_stock_quantity(stock.s_quantity, reserved_quantity as i32); - - let reserved = ReserveItemOutput { - s_dist: district_stock_info(&stock, district), - s_data: stock.s_data.clone(), - updated_quantity, - rollback_token, - }; - - tx.db.stock().stock_key().update(Stock { - s_quantity: updated_quantity, - s_ytd: stock.s_ytd + u64::from(reserved_quantity), - s_order_cnt: stock.s_order_cnt + 1, - // This must be an order from a remote warehouse, it's coming from a whole different database. - s_remote_cnt: stock.s_remote_cnt + 1, - ..stock - }); - - Ok(reserved) - }) -} - -#[reducer] -pub fn rollback_item_reservation(ctx: &ReducerContext, rollback_token: u64) -> Result<(), String> { - let line = ctx - .db - .reserved_item_log() - .rollback_token() - .find(rollback_token) - .ok_or_else(|| format!("No such rollback token: {rollback_token}"))? - .line; - let stock = find_stock(ctx, line.supply_w_id, line.item_id)?; - let quantity = line.quantity; - ctx.db.stock().stock_key().update(Stock { - s_quantity: reverse_stock_quantity(stock.s_quantity, quantity as i32), - s_ytd: stock.s_ytd - line.quantity as u64, - s_order_cnt: stock.s_order_cnt - 1, - s_remote_cnt: stock.s_remote_cnt - 1, - ..stock - }); - ctx.db.reserved_item_log().rollback_token().delete(rollback_token); - Ok(()) -} - -#[reducer] -pub fn confirm_item_reservation(ctx: &ReducerContext, rollback_token: u64) { - ctx.db.reserved_item_log().rollback_token().delete(rollback_token); -} - #[procedure] pub fn payment( ctx: &mut ProcedureContext, @@ -1425,7 +863,7 @@ fn process_delivery_district( }); } - let customer = find_customer_by_id_reducer(ctx, w_id, d_id, order.o_c_id)?; + let customer = find_customer_by_id(ctx, w_id, d_id, order.o_c_id)?; ctx.db.customer().customer_key().update(Customer { c_balance_cents: customer.c_balance_cents + total_amount_cents, c_delivery_cnt: customer.c_delivery_cnt + 1, @@ -1477,7 +915,7 @@ fn find_district(tx: &spacetimedb::TxContext, w_id: u16, d_id: u8) -> Result Result { +fn find_customer_by_id(tx: &ReducerContext, w_id: u16, d_id: u8, c_id: u32) -> Result { tx.db .customer() .by_w_d_c_id() @@ -1486,23 +924,6 @@ fn find_customer_by_id(tx: &spacetimedb::TxContext, w_id: u16, d_id: u8, c_id: u .ok_or_else(|| format!("customer ({w_id}, {d_id}, {c_id}) not found")) } -fn find_customer_by_id_reducer(ctx: &ReducerContext, w_id: u16, d_id: u8, c_id: u32) -> Result { - ctx.db - .customer() - .by_w_d_c_id() - .filter((w_id, d_id, c_id)) - .next() - .ok_or_else(|| format!("customer ({w_id}, {d_id}, {c_id}) not found")) -} - -fn find_item(tx: &spacetimedb::TxContext, item_id: u32) -> Result { - tx.db - .item() - .i_id() - .find(item_id) - .ok_or_else(|| format!("item {item_id} not found")) -} - fn find_stock(tx: &ReducerContext, w_id: u16, item_id: u32) -> Result { tx.db .stock() @@ -1512,56 +933,6 @@ fn find_stock(tx: &ReducerContext, w_id: u16, item_id: u32) -> Result String { - match d_id { - 1 => stock.s_dist_01.clone(), - 2 => stock.s_dist_02.clone(), - 3 => stock.s_dist_03.clone(), - 4 => stock.s_dist_04.clone(), - 5 => stock.s_dist_05.clone(), - 6 => stock.s_dist_06.clone(), - 7 => stock.s_dist_07.clone(), - 8 => stock.s_dist_08.clone(), - 9 => stock.s_dist_09.clone(), - 10 => stock.s_dist_10.clone(), - _ => String::new(), - } -} - -fn contains_original(data: &str) -> bool { - data.contains("ORIGINAL") -} - -fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { - assert!(ordered_quantity >= 1); - assert!(ordered_quantity <= 10); - if current_quantity - ordered_quantity >= 10 { - current_quantity - ordered_quantity - } else { - current_quantity - ordered_quantity + 91 - } -} - -/// NON-CONFORMANT: we're abusing the fact that TPC-C updates stock quantities in a predictable way -/// which is both commutative and associative to be able to roll back stock reservations. -fn reverse_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { - assert!(ordered_quantity >= 1); - assert!(ordered_quantity <= 10); - if current_quantity + ordered_quantity >= 91 { - current_quantity + ordered_quantity - 91 - } else { - current_quantity + ordered_quantity - } -} - -fn apply_tax(amount_cents: i64, total_tax_bps: i64) -> i64 { - amount_cents * (TAX_SCALE + total_tax_bps) / TAX_SCALE -} - -fn apply_discount(amount_cents: i64, discount_bps: i64) -> i64 { - amount_cents * (TAX_SCALE - discount_bps) / TAX_SCALE -} - fn pack_district_key(w_id: u16, d_id: u8) -> u32 { (u32::from(w_id) * 100) + u32::from(d_id) } @@ -1578,10 +949,6 @@ fn pack_order_key(w_id: u16, d_id: u8, o_id: u32) -> u64 { ((u64::from(w_id) * 100) + u64::from(d_id)) * 10_000_000 + u64::from(o_id) } -fn pack_order_line_key(w_id: u16, d_id: u8, o_id: u32, ol_number: u8) -> u64 { - pack_order_key(w_id, d_id, o_id) * 100 + u64::from(ol_number) -} - fn as_delivery_completion_view(row: DeliveryCompletion) -> DeliveryCompletionView { DeliveryCompletionView { completion_id: row.completion_id, @@ -1598,25 +965,105 @@ fn as_delivery_completion_view(row: DeliveryCompletion) -> DeliveryCompletionVie } } -#[cfg(test)] -mod tests { +mod test { + use crate::new_order::{adjust_stock_quantity, pack_order_line_key}; + use super::*; - #[test] - fn middle_customer_selection_uses_lower_middle_for_even_count() { + #[procedure] + fn test(_ctx: &mut ProcedureContext) -> Result { + let mut errors = vec![]; + + macro_rules! test_fail { + ($reason:expr) => { + errors.push($reason); + }; + } + + #[allow(unused)] + macro_rules! test_assert { + ($test_name:literal, $condition:expr) => { + let condition = $condition; + if !condition { + test_fail!(format!( + "{}: {} returned false", + $test_name, + stringify!($condition) + )); + } + }; + } + + macro_rules! test_assert_eq { + ($test_name:literal, $lhs:expr, $rhs:expr) => { + let lhs = $lhs; + let rhs = $rhs; + let condition = lhs == rhs; + if !condition { + test_fail!(format!( + "{}: expected {} == {}, but got: +{} => {}, +{} => {}", + $test_name, + stringify!($lhs), + stringify!($rhs), + stringify!($lhs), + lhs, + stringify!($rhs), + rhs, + )); + } + }; + } + + macro_rules! test_assert_lt { + ($test_name:literal, $lhs:expr, $rhs:expr) => { + let lhs = $lhs; + let rhs = $rhs; + let condition = lhs < rhs; + if !condition { + test_fail!(format!( + "{}: expected {} < {}, but got: +{} => {}, +{} => {}", + $test_name, + stringify!($lhs), + stringify!($rhs), + stringify!($lhs), + lhs, + stringify!($rhs), + rhs, + )); + } + }; + } + let idx = (4usize - 1) / 2; - assert_eq!(idx, 1); - } + test_assert_eq!("middle_customer_selection_uses_lower_middle_for_even_count", idx, 1); - #[test] - fn stock_quantity_wraps_like_tpcc() { - assert_eq!(adjust_stock_quantity(20, 5), 15); - assert_eq!(adjust_stock_quantity(10, 5), 96); - } + test_assert_eq!("stock_quantity_wraps_like_tpcc", adjust_stock_quantity(20, 5), 15); + test_assert_eq!("stock_quantity_wraps_like_tpcc", adjust_stock_quantity(10, 5), 96); - #[test] - fn packing_roundtrips_expected_ranges() { - assert!(pack_customer_key(1, 1, 1) < pack_customer_key(1, 1, 2)); - assert!(pack_order_line_key(1, 1, 1, 1) < pack_order_line_key(1, 1, 1, 2)); + test_assert_lt!( + "packing_roundtrips_expected_ranges", + pack_customer_key(1, 1, 1), + pack_customer_key(1, 1, 2) + ); + test_assert_lt!( + "packing_roundtrips_expected_ranges", + pack_order_line_key(1, 1, 1, 1), + pack_order_line_key(1, 1, 1, 2) + ); + + if errors.is_empty() { + Ok("All tests passed.".to_string()) + } else { + let mut output = format!("Saw {} test failures:\n", errors.len()); + for error in errors { + output.push_str(&error); + output.push('\n'); + } + Err(output) + } } } diff --git a/modules/tpcc/src/new_order.rs b/modules/tpcc/src/new_order.rs new file mode 100644 index 00000000000..db4b7c39b5c --- /dev/null +++ b/modules/tpcc/src/new_order.rs @@ -0,0 +1,598 @@ +use spacetimedb::{ + procedure, reducer, table, Identity, ProcedureContext, ReducerContext, SpacetimeType, Table, Timestamp, TxContext, +}; +use spacetimedb_sats::serde::SerdeWrapper; + +use crate::{ + district, find_customer_by_id, find_district, find_stock, find_warehouse, item, order_line, pack_order_key, + remote::{call_remote_function, get_spacetimedb_uri}, + stock, warehouse, District, Item, OrderLine, Stock, WarehouseId, DISTRICTS_PER_WAREHOUSE, TAX_SCALE, +}; + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderLineInput { + pub item_id: u32, + pub supply_w_id: WarehouseId, + pub quantity: u32, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderLineResult { + pub item_id: u32, + pub item_name: String, + pub supply_w_id: WarehouseId, + pub quantity: u32, + pub stock_quantity: i32, + pub item_price_cents: i64, + pub amount_cents: i64, + pub brand_generic: String, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderResult { + pub warehouse_tax_bps: i32, + pub district_tax_bps: i32, + pub customer_discount_bps: i32, + pub customer_last: String, + pub customer_credit: String, + pub order_id: u32, + pub entry_d: Timestamp, + pub total_amount_cents: i64, + pub all_local: bool, + pub lines: Vec, +} + +#[procedure] +pub fn new_order( + ctx: &mut ProcedureContext, + w_id: WarehouseId, + d_id: u8, + c_id: u32, + order_lines: Vec, +) -> Result { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), + "district id out of range" + ); + ensure!( + (5..=15).contains(&order_lines.len()), + "new-order requires between 5 and 15 order lines" + ); + + // Setup TX: validate warehouse, district, customer ID. + // NON-CONFORMANT: These never change in TPC-C, + // so we don't need to include the checks in the same transaction as the rest of the work. + let (warehouse, district, customer, spacetimedb_uri) = ctx.try_with_tx(|tx| { + let warehouse = find_warehouse(tx, w_id)?; + let district = find_district(tx, w_id, d_id)?; + let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; + let spacetimedb_uri = get_spacetimedb_uri(tx); + Ok::<_, String>((warehouse, district, customer, spacetimedb_uri)) + })?; + + let PartitionedItems { + local_database_items, + remote_database_items, + all_local_warehouse, + } = + // Look up all of the items in the order, and fail if any of them doesn't exist. + // If they all exist, sort them into two groups: + // - `local_database_items`, items in warehouses managed by this database. + // - `remote_database_items`, items in warehouses managed by remote databases. + // Also compute `all_local_warehouse`, which says if all of the items are in the warehouse `w_id`. + // NON-CONFORMANT: This is a separate transaction from the later one, + // which updates stock quantities for the local items and records the new order. + // In a real system, an item might change between the two, but none of the TPC-C transactions writes to items. + // We (ab)use this knowledge to skip compensating for writes to items. + partition_local_from_remote_database_items(ctx, w_id, &order_lines)?; + + // NON-CONFORMANT: We reserve items from the remote database extra-transactionally. + // If our TPC-C transaction fails, we'll roll back those reservations. + // This opens us up to dirty read isolation hazards, + // where a concurrent transaction may observe a change in stock quantity that later rolls back. + // This will never happen with only the TPC-C transactions, + // as stock quantity is only written by the `new_order` transaction, + // and `new_order` can only fail prior to updating the stock quantity, due to non-existent items. + // We (ab)use this knowledge to skip compensating for rollbacks to prevent dirty reads. + let remote_item_reservations = reserve_remote_items(ctx, &spacetimedb_uri, d_id, &remote_database_items)?; + + match ctx.try_with_tx(|tx| { + let district = tx + .db + .district() + .district_key() + .find(district.district_key) + .expect("District should not have been removed since we retrieved it last"); + let order_id = district.d_next_o_id; + tx.db.district().district_key().update(District { + d_next_o_id: order_id + 1, + ..district + }); + + let line_results = local_database_items + .iter() + .map(|local_item| claim_stock_for_local_database_item(tx, local_item, d_id)) + .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( + |(remote_item, reserved_item)| remote_item_to_processed_new_order_item(remote_item, reserved_item), + )) + .map(|processed_item| insert_order_line(tx, w_id, d_id, order_id, processed_item)) + .collect::>(); + + let subtotal_cents = line_results.iter().map(|line_result| line_result.amount_cents).sum(); + + let taxed = apply_tax( + subtotal_cents, + i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), + ); + let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); + + Ok(NewOrderResult { + warehouse_tax_bps: warehouse.w_tax_bps, + district_tax_bps: district.d_tax_bps, + customer_discount_bps: customer.c_discount_bps, + customer_last: customer.c_last.clone(), + customer_credit: customer.c_credit.clone(), + order_id, + entry_d: tx.timestamp, + total_amount_cents, + all_local: all_local_warehouse, + lines: line_results, + }) + }) { + Ok(result) => { + confirm_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + &remote_database_items, + remote_item_reservations, + ); + Ok(result) + } + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + &remote_database_items, + remote_item_reservations, + ); + Err(e) + } + } +} + +struct LocalDatabaseItem { + idx: usize, + line: NewOrderLineInput, + item: Item, + is_remote_warehouse: bool, +} + +struct RemoteDatabaseItem { + idx: usize, + line: NewOrderLineInput, + item: Item, + remote_database_identity: Identity, +} + +struct PartitionedItems { + local_database_items: Vec, + remote_database_items: Vec, + + /// Are all items from the same warehouse as the requesting terminal? + /// + /// Note that this may be false even if all items are partitioned into [`Self::local_database_items`], + /// as we may manage multiple warehouses with a single database. + all_local_warehouse: bool, +} + +fn partition_local_from_remote_database_items( + ctx: &mut ProcedureContext, + local_warehouse_id: WarehouseId, + order_lines: &[NewOrderLineInput], +) -> Result { + ctx.try_with_tx(|tx| { + let mut local_database_items: Vec = Vec::with_capacity(order_lines.len()); + let mut remote_database_items: Vec = Vec::with_capacity(order_lines.len()); + + // Whether this order applies only to a single warehouse. + // This may be `false` even when `remote_database_items_to_get` is non-empty, + // as we may run multiple warehouses from the same database. + let mut all_local_warehouse = true; + + for (idx, line) in order_lines.iter().enumerate() { + ensure!(line.quantity > 0, "order line quantity must be positive"); + + let is_remote_warehouse = line.supply_w_id == local_warehouse_id; + all_local_warehouse &= !is_remote_warehouse; + + let warehouse = tx + .db + .warehouse() + .w_id() + .find(line.supply_w_id) + .ok_or_else(|| format!("No such warehouse: {}", line.supply_w_id))?; + + // TECHNICALLY NON-CONFORMANT: If we encounter a non-existent item in the order, + // we'll short-circuit and exit here. + // TPC-C technically requires, in 2.4.2.3, that we still retrieve and process all the valid item numbers. + // This would be a horrendous pain to implement, so we won't. + // We don't do the things the spec tells us it doesn't want us to do, namely: + // - changing the execution of other steps + // - using a different type of transaction + // But we do skip inspecting some number of valid items and stocks. + let item = find_item(tx, line.item_id)?; + match warehouse.remote_database_home { + None => { + // Warehouse is local to this database. + // We'll actually "process" the items, i.e. decrement the stock and sum the order price, + // after we look up and process all the remote items. + local_database_items.push(LocalDatabaseItem { + idx, + line: line.clone(), + item, + is_remote_warehouse, + }); + } + Some(remote_database_identity) => { + // Warehouse is on another database; we'll have to do a remote request. + // This is *really* non-conformant. + // TODO(docs): link to blog post justifying this. + remote_database_items.push(RemoteDatabaseItem { + idx, + line: line.clone(), + item, + remote_database_identity, + }); + } + } + } + + Ok(PartitionedItems { + local_database_items, + remote_database_items, + all_local_warehouse, + }) + }) +} + +fn reserve_remote_items( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + district_id: u8, + remote_database_items: &[RemoteDatabaseItem], +) -> Result, String> { + let mut remote_item_reservations: Vec = Vec::with_capacity(remote_database_items.len()); + + for RemoteDatabaseItem { + line, + remote_database_identity, + .. + } in remote_database_items + { + match call_remote_function( + ctx, + &spacetimedb_uri, + *remote_database_identity, + "reserve_item_for_remote_order", + vec![serde_json::json!(spacetimedb_sats::serde::SerdeWrapper( + ReserveItemInput { + line: NewOrderLineInput::clone(line), + district: district_id, + } + ))], + ) { + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + remote_database_items, + remote_item_reservations, + ); + return Err(format!("Error reserving remote item: {e}")); + } + Ok(body) => { + let body = body.into_string().expect("Body should be valid UTF-8"); + let res: SerdeWrapper> = + serde_json::from_str(&body).expect("Response does not conform to expected schema"); + match res.0 { + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + remote_database_items, + remote_item_reservations, + ); + return Err(format!("Error reserving remote item from database: {e}")); + } + Ok(output) => remote_item_reservations.push(output), + } + } + }; + } + + Ok(remote_item_reservations) +} + +fn rollback_all_remote_item_reservations( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + remote_items: &[RemoteDatabaseItem], + reservations: Vec, +) { + for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { + if let Err(e) = call_remote_function( + ctx, + spacetimedb_uri, + remote_item.remote_database_identity, + "rollback_item_reservation", + vec![serde_json::json!(reservation.rollback_token)], + ) { + log::error!("Error rollinb back item reservation: {e}"); + } + } +} + +fn confirm_all_remote_item_reservations( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + remote_items: &[RemoteDatabaseItem], + reservations: Vec, +) { + for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { + if let Err(e) = call_remote_function( + ctx, + spacetimedb_uri, + remote_item.remote_database_identity, + "confirm_item_reservation", + vec![serde_json::json!(reservation.rollback_token)], + ) { + log::error!("Error confirming item reservation: {e}"); + } + } +} + +struct ProcessedNewOrderItem { + idx: usize, + line: NewOrderLineInput, + item: Item, + district_stock_info: String, + stock_data: String, + updated_quantity: i32, +} + +fn claim_stock_for_local_database_item( + tx: &TxContext, + local_item: &LocalDatabaseItem, + district_id: u8, +) -> ProcessedNewOrderItem { + let stock = + find_stock(tx, local_item.line.supply_w_id, local_item.line.item_id).expect("Stock should exist for all items"); + let updated_quantity = adjust_stock_quantity(stock.s_quantity, local_item.line.quantity as i32); + tx.db.stock().stock_key().update(Stock { + s_quantity: updated_quantity, + s_ytd: stock.s_ytd + local_item.line.quantity as u64, + s_order_cnt: stock.s_order_cnt + 1, + s_remote_cnt: stock.s_remote_cnt + u32::from(local_item.is_remote_warehouse), + ..stock.clone() + }); + + ProcessedNewOrderItem { + idx: local_item.idx, + line: local_item.line.clone(), + item: local_item.item.clone(), + district_stock_info: district_stock_info(&stock, district_id), + stock_data: stock.s_data.clone(), + updated_quantity, + } +} + +fn remote_item_to_processed_new_order_item( + remote_item: &RemoteDatabaseItem, + reserved_item: &ReserveItemOutput, +) -> ProcessedNewOrderItem { + ProcessedNewOrderItem { + idx: remote_item.idx, + line: remote_item.line.clone(), + item: remote_item.item.clone(), + district_stock_info: reserved_item.s_dist.clone(), + stock_data: reserved_item.s_data.clone(), + updated_quantity: reserved_item.updated_quantity, + } +} + +fn insert_order_line( + tx: &TxContext, + warehouse_id: WarehouseId, + district_id: u8, + order_id: u32, + processed_item: ProcessedNewOrderItem, +) -> NewOrderLineResult { + let ProcessedNewOrderItem { + idx, + line, + item, + district_stock_info, + stock_data, + updated_quantity, + } = processed_item; + let line_amount_cents = line.quantity as i64 * item.i_price_cents; + let brand_generic = if contains_original(&item.i_data) && contains_original(&stock_data) { + "B" + } else { + "G" + }; + tx.db.order_line().insert(OrderLine { + order_line_key: pack_order_line_key(warehouse_id, district_id, order_id, (idx + 1) as u8), + ol_w_id: warehouse_id, + ol_d_id: district_id, + ol_o_id: order_id, + ol_number: (idx + 1) as u8, + ol_i_id: line.item_id, + ol_supply_w_id: line.supply_w_id, + ol_delivery_d: None, + ol_quantity: line.quantity, + ol_amount_cents: line_amount_cents, + ol_dist_info: district_stock_info, + }); + + NewOrderLineResult { + item_id: item.i_id, + item_name: item.i_name, + supply_w_id: line.supply_w_id, + quantity: line.quantity, + stock_quantity: updated_quantity, + item_price_cents: item.i_price_cents, + amount_cents: line_amount_cents, + brand_generic: brand_generic.to_string(), + } +} + +#[derive(SpacetimeType)] +pub struct ReserveItemOutput { + s_dist: String, + s_data: String, + updated_quantity: i32, + rollback_token: u64, +} + +#[table(accessor = reserved_item_log)] +pub struct ReservedItemLog { + #[primary_key] + #[auto_inc] + rollback_token: u64, + line: NewOrderLineInput, +} + +#[derive(SpacetimeType)] +pub struct ReserveItemInput { + line: NewOrderLineInput, + district: u8, +} + +#[procedure] +pub fn reserve_item_for_remote_order( + ctx: &mut ProcedureContext, + input: ReserveItemInput, +) -> Result { + let ReserveItemInput { line, district } = input; + ctx.try_with_tx(|tx| { + let stock = find_stock(tx, line.supply_w_id, line.item_id)?; + + let ReservedItemLog { rollback_token, .. } = tx.db.reserved_item_log().insert(ReservedItemLog { + rollback_token: 0, + line: line.clone(), + }); + + let reserved_quantity = line.quantity; + let updated_quantity = adjust_stock_quantity(stock.s_quantity, reserved_quantity as i32); + + let reserved = ReserveItemOutput { + s_dist: district_stock_info(&stock, district), + s_data: stock.s_data.clone(), + updated_quantity, + rollback_token, + }; + + tx.db.stock().stock_key().update(Stock { + s_quantity: updated_quantity, + s_ytd: stock.s_ytd + u64::from(reserved_quantity), + s_order_cnt: stock.s_order_cnt + 1, + // This must be an order from a remote warehouse, it's coming from a whole different database. + s_remote_cnt: stock.s_remote_cnt + 1, + ..stock + }); + + Ok(reserved) + }) +} + +#[reducer] +pub fn rollback_item_reservation(ctx: &ReducerContext, rollback_token: u64) -> Result<(), String> { + let line = ctx + .db + .reserved_item_log() + .rollback_token() + .find(rollback_token) + .ok_or_else(|| format!("No such rollback token: {rollback_token}"))? + .line; + let stock = find_stock(ctx, line.supply_w_id, line.item_id)?; + let quantity = line.quantity; + ctx.db.stock().stock_key().update(Stock { + s_quantity: reverse_stock_quantity(stock.s_quantity, quantity as i32), + s_ytd: stock.s_ytd - line.quantity as u64, + s_order_cnt: stock.s_order_cnt - 1, + s_remote_cnt: stock.s_remote_cnt - 1, + ..stock + }); + ctx.db.reserved_item_log().rollback_token().delete(rollback_token); + Ok(()) +} + +#[reducer] +pub fn confirm_item_reservation(ctx: &ReducerContext, rollback_token: u64) { + ctx.db.reserved_item_log().rollback_token().delete(rollback_token); +} + +fn apply_tax(amount_cents: i64, total_tax_bps: i64) -> i64 { + amount_cents * (TAX_SCALE + total_tax_bps) / TAX_SCALE +} + +fn apply_discount(amount_cents: i64, discount_bps: i64) -> i64 { + amount_cents * (TAX_SCALE - discount_bps) / TAX_SCALE +} + +fn find_item(tx: &spacetimedb::TxContext, item_id: u32) -> Result { + tx.db + .item() + .i_id() + .find(item_id) + .ok_or_else(|| format!("item {item_id} not found")) +} + +// public for test in lib.rs +pub fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { + assert!(ordered_quantity >= 1); + assert!(ordered_quantity <= 10); + if current_quantity - ordered_quantity >= 10 { + current_quantity - ordered_quantity + } else { + current_quantity - ordered_quantity + 91 + } +} + +/// NON-CONFORMANT: we're abusing the fact that TPC-C updates stock quantities in a predictable way +/// which is both commutative and associative to be able to roll back stock reservations. +fn reverse_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { + assert!(ordered_quantity >= 1); + assert!(ordered_quantity <= 10); + if current_quantity + ordered_quantity >= 91 { + current_quantity + ordered_quantity - 91 + } else { + current_quantity + ordered_quantity + } +} + +fn district_stock_info(stock: &Stock, d_id: u8) -> String { + match d_id { + 1 => stock.s_dist_01.clone(), + 2 => stock.s_dist_02.clone(), + 3 => stock.s_dist_03.clone(), + 4 => stock.s_dist_04.clone(), + 5 => stock.s_dist_05.clone(), + 6 => stock.s_dist_06.clone(), + 7 => stock.s_dist_07.clone(), + 8 => stock.s_dist_08.clone(), + 9 => stock.s_dist_09.clone(), + 10 => stock.s_dist_10.clone(), + _ => String::new(), + } +} + +fn contains_original(data: &str) -> bool { + data.contains("ORIGINAL") +} + +// public for test in lib.rs +pub fn pack_order_line_key(w_id: u16, d_id: u8, o_id: u32, ol_number: u8) -> u64 { + pack_order_key(w_id, d_id, o_id) * 100 + u64::from(ol_number) +} diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs new file mode 100644 index 00000000000..372005e8fb1 --- /dev/null +++ b/modules/tpcc/src/remote.rs @@ -0,0 +1,41 @@ +use http::Request; +use spacetimedb::{reducer, table, Identity, ProcedureContext, ReducerContext, Table, TxContext}; + +#[table(accessor = spacetimedb_uri)] +struct SpacetimeDbUri { + uri: String, +} + +#[reducer] +fn set_spacetimedb_uri(ctx: &ReducerContext, uri: String) { + for row in ctx.db.spacetimedb_uri().iter() { + ctx.db.spacetimedb_uri().delete(row); + } + ctx.db.spacetimedb_uri().insert(SpacetimeDbUri { uri }); +} + +pub fn get_spacetimedb_uri(tx: &TxContext) -> String { + tx.db.spacetimedb_uri().iter().next().unwrap().uri +} + +pub fn call_remote_function( + ctx: &mut ProcedureContext, + spacetimedb_uri: &str, + database_ident: Identity, + function_name: &str, + arguments: Vec, +) -> Result { + let request = Request::builder() + .uri(format!( + "{spacetimedb_uri}/v1/database/{database_ident}/call/{function_name}" + )) + .method("POST") + // TODO(auth): include a token. + .body(serde_json::json!(arguments).to_string()) + .map_err(|e| format!("Error constructing `Request`: {e}"))?; + match ctx.http.send(request) { + Err(e) => Err(format!("Error sending request to remote database {database_ident} at URI {spacetimedb_uri} to call {function_name}: {e}")), + Ok(response) if response.status() != http::status::StatusCode::OK => Err(format!("Got non-200 response code {} from request to remote database {database_ident} at URI {spacetimedb_uri} when calling {function_name}: {}", response.status(), response.into_body().into_string_lossy())), + Ok(response) => Ok(response.into_body()), + } +} From 8203605e2507ecb620034efcc5548cc708cbe587 Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 01:12:39 +0530 Subject: [PATCH 05/38] abi --- crates/bindings-sys/src/lib.rs | 33 ++++++++ crates/client-api/src/routes/database.rs | 22 +++-- crates/core/src/host/host_controller.rs | 17 +++- crates/core/src/host/instance_env.rs | 45 +++++++++- crates/core/src/host/mod.rs | 2 + crates/core/src/host/wasm_common.rs | 4 + .../src/host/wasmtime/wasm_instance_env.rs | 84 ++++++++++++++++++- .../core/src/host/wasmtime/wasmtime_module.rs | 2 +- crates/core/src/replica_context.rs | 46 ++++++++++ 9 files changed, 243 insertions(+), 12 deletions(-) diff --git a/crates/bindings-sys/src/lib.rs b/crates/bindings-sys/src/lib.rs index 95dfbc7e600..79835e120a3 100644 --- a/crates/bindings-sys/src/lib.rs +++ b/crates/bindings-sys/src/lib.rs @@ -865,6 +865,39 @@ pub mod raw { ) -> u16; } + #[link(wasm_import_module = "spacetime_10.5")] + unsafe extern "C" { + /// Call a reducer on another SpacetimeDB database via the local reverse proxy at `localhost:80`. + /// + /// - `identity_ptr` must point to exactly 32 bytes — the BSATN (little-endian) encoding of + /// the target database `Identity`. + /// - `reducer_ptr[..reducer_len]` is the UTF-8 name of the reducer to call. + /// - `args_ptr[..args_len]` is the BSATN-encoded reducer arguments. + /// + /// On transport success (any HTTP response received): + /// - Returns the HTTP status code (e.g. 200, 400, 530). + /// - Writes a [`BytesSource`] containing the response body bytes to `*out`. + /// + /// On transport failure (connection refused, timeout, etc.): + /// - Returns [`errno::HTTP_ERROR`] (21). + /// - Writes a [`BytesSource`] containing a BSATN-encoded error [`String`] to `*out`. + /// + /// Unlike `procedure_http_request`, this syscall may be called while a transaction + /// is open (i.e. from within a reducer body). + /// + /// # Traps + /// + /// Traps if any pointer is NULL or its range falls outside of linear memory. + pub fn call_reducer_on_db( + identity_ptr: *const u8, // exactly 32 bytes, BSATN-encoded Identity + reducer_ptr: *const u8, + reducer_len: u32, + args_ptr: *const u8, + args_len: u32, + out: *mut BytesSource, + ) -> u16; + } + /// What strategy does the database index use? /// /// See also: diff --git a/crates/client-api/src/routes/database.rs b/crates/client-api/src/routes/database.rs index 6b753a9c8fd..881abcc0e18 100644 --- a/crates/client-api/src/routes/database.rs +++ b/crates/client-api/src/routes/database.rs @@ -136,13 +136,11 @@ pub async fn call( reducer, }): Path, TypedHeader(content_type): TypedHeader, - ByteStringBody(body): ByteStringBody, + body: Bytes, ) -> axum::response::Result { - assert_content_type_json(content_type)?; - let caller_identity = auth.claims.identity; - let args = FunctionArgs::Json(body); + let args = parse_call_args(content_type, body)?; // HTTP callers always need a connection ID to provide to connect/disconnect, // so generate one. @@ -216,11 +214,19 @@ pub async fn call( } } -fn assert_content_type_json(content_type: headers::ContentType) -> axum::response::Result<()> { - if content_type != headers::ContentType::json() { - Err(axum::extract::rejection::MissingJsonContentType::default().into()) +/// Parse call arguments from an HTTP body based on content type. +/// +/// - `application/json` → [`FunctionArgs::Json`] (UTF-8 required). +/// - `application/octet-stream` → [`FunctionArgs::Bsatn`] (raw BSATN bytes). +fn parse_call_args(content_type: headers::ContentType, body: Bytes) -> axum::response::Result { + if content_type == headers::ContentType::json() { + let s = bytestring::ByteString::try_from(body) + .map_err(|_| (StatusCode::BAD_REQUEST, "request body is not valid UTF-8").into_response())?; + Ok(FunctionArgs::Json(s)) + } else if content_type == headers::ContentType::from(mime::APPLICATION_OCTET_STREAM) { + Ok(FunctionArgs::Bsatn(body)) } else { - Ok(()) + Err(axum::extract::rejection::MissingJsonContentType::default().into()) } } diff --git a/crates/core/src/host/host_controller.rs b/crates/core/src/host/host_controller.rs index e67e67540eb..a813a1cb426 100644 --- a/crates/core/src/host/host_controller.rs +++ b/crates/core/src/host/host_controller.rs @@ -13,7 +13,7 @@ use crate::host::v8::V8Runtime; use crate::host::ProcedureCallError; use crate::messages::control_db::{Database, HostType}; use crate::module_host_context::ModuleCreationContext; -use crate::replica_context::ReplicaContext; +use crate::replica_context::{CallReducerOnDbConfig, ReplicaContext}; use crate::subscription::module_subscription_actor::ModuleSubscriptions; use crate::subscription::module_subscription_manager::{spawn_send_worker, SubscriptionManager, TransactionOffset}; use crate::subscription::row_list_builder_pool::BsatnRowListBuilderPool; @@ -117,6 +117,11 @@ pub struct HostController { db_cores: JobCores, /// The pool of buffers used to build `BsatnRowList`s in subscriptions. pub bsatn_rlb_pool: BsatnRowListBuilderPool, + /// Warmed HTTP/2 client shared by all replicas on this host for + /// [`crate::host::instance_env::InstanceEnv::call_reducer_on_db`]. + /// + /// All per-replica clones share the same underlying connection pool. + pub call_reducer_client: reqwest::Client, } pub(crate) struct HostRuntimes { @@ -228,6 +233,7 @@ impl HostController { page_pool: PagePool::new(default_config.page_pool_max_size), bsatn_rlb_pool: BsatnRowListBuilderPool::new(), db_cores, + call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), } } @@ -664,6 +670,7 @@ async fn make_replica_ctx( replica_id: u64, relational_db: Arc, bsatn_rlb_pool: BsatnRowListBuilderPool, + call_reducer_client: reqwest::Client, ) -> anyhow::Result { let logger = match module_logs { Some(path) => asyncify(move || Arc::new(DatabaseLogger::open_today(path))).await, @@ -696,6 +703,7 @@ async fn make_replica_ctx( replica_id, logger, subscriptions, + call_reducer_client, }) } @@ -771,6 +779,7 @@ struct ModuleLauncher { runtimes: Arc, core: AllocatedJobCore, bsatn_rlb_pool: BsatnRowListBuilderPool, + call_reducer_client: reqwest::Client, } impl ModuleLauncher { @@ -790,6 +799,7 @@ impl ModuleLauncher { self.replica_id, self.relational_db, self.bsatn_rlb_pool, + self.call_reducer_client, ) .await .map(Arc::new)?; @@ -991,6 +1001,7 @@ impl Host { runtimes: runtimes.clone(), core: host_controller.db_cores.take(), bsatn_rlb_pool: bsatn_rlb_pool.clone(), + call_reducer_client: host_controller.call_reducer_client.clone(), } .launch_module() .await? @@ -1020,6 +1031,7 @@ impl Host { runtimes: runtimes.clone(), core: host_controller.db_cores.take(), bsatn_rlb_pool: bsatn_rlb_pool.clone(), + call_reducer_client: host_controller.call_reducer_client.clone(), } .launch_module() .await; @@ -1043,6 +1055,7 @@ impl Host { runtimes: runtimes.clone(), core: host_controller.db_cores.take(), bsatn_rlb_pool: bsatn_rlb_pool.clone(), + call_reducer_client: host_controller.call_reducer_client.clone(), } .launch_module() .await; @@ -1150,6 +1163,8 @@ impl Host { runtimes: runtimes.clone(), core, bsatn_rlb_pool, + // Transient validation-only module; build its own client with defaults. + call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), } .launch_module() .await diff --git a/crates/core/src/host/instance_env.rs b/crates/core/src/host/instance_env.rs index 311c34775fd..641e1692485 100644 --- a/crates/core/src/host/instance_env.rs +++ b/crates/core/src/host/instance_env.rs @@ -977,6 +977,48 @@ impl InstanceEnv { Ok((response, body)) }) } + + /// Call a reducer on a remote database via the local reverse proxy (`localhost:80`). + /// + /// Unlike [`Self::http_request`], this is explicitly allowed while a transaction is open — + /// the caller is responsible for understanding the consistency implications. + /// + /// Uses the warmed HTTP/2 client stored in [`ReplicaContext::call_reducer_client`], + /// configured when the replica was constructed. + /// + /// Returns `(http_status, response_body)` on transport success, + /// or [`NodesError::HttpError`] if the connection itself fails. + pub fn call_reducer_on_db( + &self, + database_identity: Identity, + reducer_name: &str, + args: bytes::Bytes, + ) -> impl Future> + use<> { + let client = self.replica_ctx.call_reducer_client.clone(); + let url = format!( + "http://localhost/v1/database/{}/call/{}", + database_identity.to_hex(), + reducer_name, + ); + + async move { + let response = client + .post(&url) + .header(http::header::CONTENT_TYPE, "application/octet-stream") + .body(args) + .send() + .await + .map_err(|e| NodesError::HttpError(e.to_string()))?; + + let status = response.status().as_u16(); + let body = response + .bytes() + .await + .map_err(|e| NodesError::HttpError(e.to_string()))?; + + Ok((status, body)) + } + } } /// Default timeout for HTTP requests performed by [`InstanceEnv::http_request`]. @@ -1317,7 +1359,7 @@ mod test { }, host::Scheduler, messages::control_db::{Database, HostType}, - replica_context::ReplicaContext, + replica_context::{CallReducerOnDbConfig, ReplicaContext}, subscription::module_subscription_actor::ModuleSubscriptions, }; use anyhow::{anyhow, Result}; @@ -1351,6 +1393,7 @@ mod test { replica_id: 0, logger, subscriptions: subs, + call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), }, runtime, )) diff --git a/crates/core/src/host/mod.rs b/crates/core/src/host/mod.rs index 0daa9c359bc..3123913fcf6 100644 --- a/crates/core/src/host/mod.rs +++ b/crates/core/src/host/mod.rs @@ -194,4 +194,6 @@ pub enum AbiCall { ProcedureCommitMutTransaction, ProcedureAbortMutTransaction, ProcedureHttpRequest, + + CallReducerOnDb, } diff --git a/crates/core/src/host/wasm_common.rs b/crates/core/src/host/wasm_common.rs index a5c737d54d6..2c0c5fa57e6 100644 --- a/crates/core/src/host/wasm_common.rs +++ b/crates/core/src/host/wasm_common.rs @@ -436,6 +436,10 @@ macro_rules! abi_funcs { $link_async! { "spacetime_10.3"::procedure_sleep_until, "spacetime_10.3"::procedure_http_request, + + // Call a reducer on another database while holding (or not holding) a transaction. + // Uses a warmed HTTP/2 connection pool to localhost:80. + "spacetime_10.5"::call_reducer_on_db, } }; } diff --git a/crates/core/src/host/wasmtime/wasm_instance_env.rs b/crates/core/src/host/wasmtime/wasm_instance_env.rs index 74a57b35e92..868aa8b2ea5 100644 --- a/crates/core/src/host/wasmtime/wasm_instance_env.rs +++ b/crates/core/src/host/wasmtime/wasm_instance_env.rs @@ -17,7 +17,7 @@ use crate::subscription::module_subscription_manager::TransactionOffset; use anyhow::{anyhow, Context as _}; use spacetimedb_data_structures::map::IntMap; use spacetimedb_datastore::locking_tx_datastore::{FuncCallType, MutTxId, ViewCallInfo}; -use spacetimedb_lib::{bsatn, ConnectionId, Timestamp}; +use spacetimedb_lib::{bsatn, ConnectionId, Identity, Timestamp}; use spacetimedb_primitives::errno::HOST_CALL_FAILURE; use spacetimedb_primitives::{errno, ColId}; use spacetimedb_schema::def::ModuleDef; @@ -1943,6 +1943,88 @@ impl WasmInstanceEnv { ) }) } + + /// Call a reducer on another SpacetimeDB database via the local reverse proxy at `localhost:80`. + /// + /// - `identity_ptr` must point to exactly 32 bytes — the BSATN (little-endian) encoding of the + /// target [`Identity`]. + /// - `reducer_ptr[..reducer_len]` is the UTF-8 name of the reducer to call. + /// - `args_ptr[..args_len]` is the BSATN-encoded reducer arguments. + /// + /// On transport success (any HTTP response received from the server): + /// - Returns the HTTP status code (e.g. 200, 400, 530). + /// - Writes a [`BytesSource`] containing the response body bytes to `*out`. + /// + /// On transport failure (connection refused, timeout, etc.): + /// - Returns [`errno::HTTP_ERROR`]. + /// - Writes a [`BytesSource`] containing a BSATN-encoded error [`String`] to `*out`. + /// + /// Unlike [`Self::procedure_http_request`], this ABI may be called while holding + /// an open transaction (i.e. from within a reducer body). + /// + /// # Traps + /// + /// Traps if any pointer is NULL or its range falls outside of linear memory. + pub fn call_reducer_on_db<'caller>( + caller: Caller<'caller, Self>, + (identity_ptr, reducer_ptr, reducer_len, args_ptr, args_len, out): ( + WasmPtr, + WasmPtr, + u32, + WasmPtr, + u32, + WasmPtr, + ), + ) -> Fut<'caller, RtResult> { + Self::async_with_span(caller, AbiCall::CallReducerOnDb, move |mut caller| async move { + let (mem, env) = Self::mem_env(&mut caller); + + #[allow(clippy::redundant_closure_call)] + let res = (async move || { + // Read the 32-byte BSATN-encoded Identity (little-endian). + let identity_slice = mem.deref_slice(identity_ptr, 32)?; + let identity_bytes: [u8; 32] = identity_slice + .try_into() + .expect("deref_slice(ptr, 32) always yields exactly 32 bytes"); + let database_identity = Identity::from_byte_array(identity_bytes); + + // Read the reducer name as a UTF-8 string. + let reducer_name = mem.deref_str(reducer_ptr, reducer_len)?; + + // Read the BSATN-encoded args as raw bytes. + let args_buf = mem.deref_slice(args_ptr, args_len)?; + let args = bytes::Bytes::copy_from_slice(args_buf); + + let result = env + .instance_env + .call_reducer_on_db(database_identity, reducer_name, args) + .await; + + match result { + Ok((status, body)) => { + let bytes_source = WasmInstanceEnv::create_bytes_source(env, body)?; + bytes_source.0.write_to(mem, out)?; + Ok(status as u32) + } + Err(NodesError::HttpError(err)) => { + let err_bytes = bsatn::to_vec(&err).with_context(|| { + format!("Failed to BSATN-serialize call_reducer_on_db transport error: {err:?}") + })?; + let bytes_source = WasmInstanceEnv::create_bytes_source(env, err_bytes.into())?; + bytes_source.0.write_to(mem, out)?; + Ok(errno::HTTP_ERROR.get() as u32) + } + Err(e) => Err(WasmError::Db(e)), + } + })() + .await; + + ( + caller, + res.or_else(|err| Self::convert_wasm_result(AbiCall::CallReducerOnDb, err)), + ) + }) + } } type Fut<'caller, T> = Box>; diff --git a/crates/core/src/host/wasmtime/wasmtime_module.rs b/crates/core/src/host/wasmtime/wasmtime_module.rs index 48ac0fe80e2..0690120eb16 100644 --- a/crates/core/src/host/wasmtime/wasmtime_module.rs +++ b/crates/core/src/host/wasmtime/wasmtime_module.rs @@ -50,7 +50,7 @@ impl WasmtimeModule { WasmtimeModule { module } } - pub const IMPLEMENTED_ABI: abi::VersionTuple = abi::VersionTuple::new(10, 4); + pub const IMPLEMENTED_ABI: abi::VersionTuple = abi::VersionTuple::new(10, 5); pub(super) fn link_imports(linker: &mut Linker) -> anyhow::Result<()> { const { assert!(WasmtimeModule::IMPLEMENTED_ABI.major == spacetimedb_lib::MODULE_ABI_MAJOR_VERSION) }; diff --git a/crates/core/src/replica_context.rs b/crates/core/src/replica_context.rs index 731288bb459..ede7fa44cdd 100644 --- a/crates/core/src/replica_context.rs +++ b/crates/core/src/replica_context.rs @@ -8,9 +8,37 @@ use crate::subscription::module_subscription_actor::ModuleSubscriptions; use std::io; use std::ops::Deref; use std::sync::Arc; +use std::time::Duration; pub type Result = anyhow::Result; +/// Configuration for the HTTP/2 client used to call reducers on remote databases. +/// +/// Pass to [`ReplicaContext::new_call_reducer_client`] or supply directly when +/// constructing [`ReplicaContext`]. +#[derive(Debug, Clone)] +pub struct CallReducerOnDbConfig { + /// How long idle connections are held open. Default: 90 s. + pub pool_idle_timeout: Duration, + /// Max idle connections per host. Default: 8. + pub pool_max_idle_per_host: usize, + /// TCP keepalive sent to the OS. Default: 20 s. + pub tcp_keepalive: Duration, + /// Per-request timeout. Default: 30 s. + pub request_timeout: Duration, +} + +impl Default for CallReducerOnDbConfig { + fn default() -> Self { + Self { + pool_idle_timeout: Duration::from_secs(90), + pool_max_idle_per_host: 8, + tcp_keepalive: Duration::from_secs(20), + request_timeout: Duration::from_secs(30), + } + } +} + /// A "live" database. #[derive(Clone)] pub struct ReplicaContext { @@ -18,6 +46,24 @@ pub struct ReplicaContext { pub replica_id: u64, pub logger: Arc, pub subscriptions: ModuleSubscriptions, + /// Warmed HTTP/2 client for [`crate::host::instance_env::InstanceEnv::call_reducer_on_db`]. + /// + /// `reqwest::Client` is internally an `Arc`, so cloning `ReplicaContext` shares the pool. + pub call_reducer_client: reqwest::Client, +} + +impl ReplicaContext { + /// Build a warmed `reqwest::Client` from `config`. + pub fn new_call_reducer_client(config: &CallReducerOnDbConfig) -> reqwest::Client { + reqwest::Client::builder() + .http2_prior_knowledge() + .tcp_keepalive(config.tcp_keepalive) + .pool_idle_timeout(config.pool_idle_timeout) + .pool_max_idle_per_host(config.pool_max_idle_per_host) + .timeout(config.request_timeout) + .build() + .expect("failed to build call_reducer_on_db HTTP/2 client") + } } impl ReplicaContext { From f718db5f8db5a67a13b3386572588869115e70e8 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Fri, 27 Mar 2026 15:52:19 -0400 Subject: [PATCH 06/38] Update payment to connect to a remote database when necessary --- modules/tpcc/src/lib.rs | 134 +------------------------- modules/tpcc/src/new_order.rs | 14 +-- modules/tpcc/src/payment.rs | 172 ++++++++++++++++++++++++++++++++++ modules/tpcc/src/remote.rs | 28 ++++++ 4 files changed, 206 insertions(+), 142 deletions(-) create mode 100644 modules/tpcc/src/payment.rs diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index b7265410dc4..b218fcd8aa6 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,5 +1,5 @@ use spacetimedb::{ - procedure, reducer, table, Identity, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, + procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, }; use std::collections::BTreeSet; @@ -12,6 +12,7 @@ macro_rules! ensure { } mod new_order; +mod payment; mod remote; const DISTRICTS_PER_WAREHOUSE: u8 = 10; @@ -28,21 +29,6 @@ pub enum CustomerSelector { type WarehouseId = u16; -#[derive(Clone, Debug, SpacetimeType)] -pub struct PaymentResult { - pub warehouse_name: String, - pub district_name: String, - pub customer_id: u32, - pub customer_first: String, - pub customer_middle: String, - pub customer_last: String, - pub customer_balance_cents: i64, - pub customer_credit: String, - pub customer_discount_bps: i32, - pub payment_amount_cents: i64, - pub customer_data: Option, -} - #[derive(Clone, Debug, SpacetimeType)] pub struct OrderStatusLineResult { pub item_id: u32, @@ -116,13 +102,6 @@ pub struct Warehouse { pub w_zip: String, pub w_tax_bps: i32, pub w_ytd_cents: i64, - - /// Added by us: the [`Identity`] of the remote database where this warehouse is sharded, - /// or `None` if this warehouse is sharded in the local database. - /// - /// TPC-C 1.4.7: "Attributes may be added and/or duplicated from one table to another - /// as long as these changes do not improve performance." - pub remote_database_home: Option, } #[table( @@ -328,16 +307,6 @@ pub struct DeliveryCompletion { pub processed_districts: u8, } -struct PaymentRequest<'a> { - w_id: u16, - d_id: u8, - c_w_id: u16, - c_d_id: u8, - customer_selector: &'a CustomerSelector, - payment_amount_cents: i64, - now: Timestamp, -} - #[reducer] pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { for row in ctx.db.delivery_job().iter() { @@ -454,33 +423,6 @@ pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<() Ok(()) } -#[procedure] -pub fn payment( - ctx: &mut ProcedureContext, - w_id: u16, - d_id: u8, - c_w_id: u16, - c_d_id: u8, - customer: CustomerSelector, - payment_amount_cents: i64, -) -> Result { - let now = ctx.timestamp; - ctx.try_with_tx(|tx| { - payment_tx( - tx, - PaymentRequest { - w_id, - d_id, - c_w_id, - c_d_id, - customer_selector: &customer, - payment_amount_cents, - now, - }, - ) - }) -} - #[procedure] pub fn order_status( ctx: &mut ProcedureContext, @@ -665,78 +607,6 @@ fn validate_stock_row(row: &Stock) -> Result<(), String> { Ok(()) } -fn payment_tx(tx: &spacetimedb::TxContext, req: PaymentRequest<'_>) -> Result { - ensure!(req.payment_amount_cents > 0, "payment amount must be positive"); - - let warehouse = find_warehouse(tx, req.w_id)?; - let district = find_district(tx, req.w_id, req.d_id)?; - let customer = resolve_customer(tx, req.c_w_id, req.c_d_id, req.customer_selector)?; - - tx.db.warehouse().w_id().update(Warehouse { - w_ytd_cents: warehouse.w_ytd_cents + req.payment_amount_cents, - ..warehouse.clone() - }); - - tx.db.district().district_key().update(District { - d_ytd_cents: district.d_ytd_cents + req.payment_amount_cents, - ..district.clone() - }); - - let mut updated_customer = Customer { - c_balance_cents: customer.c_balance_cents - req.payment_amount_cents, - c_ytd_payment_cents: customer.c_ytd_payment_cents + req.payment_amount_cents, - c_payment_cnt: customer.c_payment_cnt + 1, - ..customer.clone() - }; - - if updated_customer.c_credit == "BC" { - let prefix = format!( - "{} {} {} {} {} {} {}|", - updated_customer.c_id, - updated_customer.c_d_id, - updated_customer.c_w_id, - req.d_id, - req.w_id, - req.payment_amount_cents, - req.now.to_micros_since_unix_epoch() - ); - updated_customer.c_data = format!("{prefix}{}", updated_customer.c_data); - updated_customer.c_data.truncate(MAX_C_DATA_LEN); - } - - tx.db.customer().customer_key().update(updated_customer.clone()); - - tx.db.history().insert(History { - history_id: 0, - h_c_id: updated_customer.c_id, - h_c_d_id: updated_customer.c_d_id, - h_c_w_id: updated_customer.c_w_id, - h_d_id: req.d_id, - h_w_id: req.w_id, - h_date: req.now, - h_amount_cents: req.payment_amount_cents, - h_data: format!("{} {}", warehouse.w_name, district.d_name), - }); - - Ok(PaymentResult { - warehouse_name: warehouse.w_name, - district_name: district.d_name, - customer_id: updated_customer.c_id, - customer_first: updated_customer.c_first, - customer_middle: updated_customer.c_middle, - customer_last: updated_customer.c_last, - customer_balance_cents: updated_customer.c_balance_cents, - customer_credit: updated_customer.c_credit.clone(), - customer_discount_bps: updated_customer.c_discount_bps, - payment_amount_cents: req.payment_amount_cents, - customer_data: if updated_customer.c_credit == "BC" { - Some(updated_customer.c_data) - } else { - None - }, - }) -} - fn order_status_tx( tx: &spacetimedb::TxContext, w_id: u16, diff --git a/modules/tpcc/src/new_order.rs b/modules/tpcc/src/new_order.rs index db4b7c39b5c..1c441bf8c4c 100644 --- a/modules/tpcc/src/new_order.rs +++ b/modules/tpcc/src/new_order.rs @@ -5,8 +5,8 @@ use spacetimedb_sats::serde::SerdeWrapper; use crate::{ district, find_customer_by_id, find_district, find_stock, find_warehouse, item, order_line, pack_order_key, - remote::{call_remote_function, get_spacetimedb_uri}, - stock, warehouse, District, Item, OrderLine, Stock, WarehouseId, DISTRICTS_PER_WAREHOUSE, TAX_SCALE, + remote::{call_remote_function, get_spacetimedb_uri, remote_warehouse_home}, + stock, District, Item, OrderLine, Stock, WarehouseId, DISTRICTS_PER_WAREHOUSE, TAX_SCALE, }; #[derive(Clone, Debug, SpacetimeType)] @@ -205,13 +205,6 @@ fn partition_local_from_remote_database_items( let is_remote_warehouse = line.supply_w_id == local_warehouse_id; all_local_warehouse &= !is_remote_warehouse; - let warehouse = tx - .db - .warehouse() - .w_id() - .find(line.supply_w_id) - .ok_or_else(|| format!("No such warehouse: {}", line.supply_w_id))?; - // TECHNICALLY NON-CONFORMANT: If we encounter a non-existent item in the order, // we'll short-circuit and exit here. // TPC-C technically requires, in 2.4.2.3, that we still retrieve and process all the valid item numbers. @@ -221,7 +214,8 @@ fn partition_local_from_remote_database_items( // - using a different type of transaction // But we do skip inspecting some number of valid items and stocks. let item = find_item(tx, line.item_id)?; - match warehouse.remote_database_home { + + match remote_warehouse_home(tx, line.supply_w_id) { None => { // Warehouse is local to this database. // We'll actually "process" the items, i.e. decrement the stock and sum the order price, diff --git a/modules/tpcc/src/payment.rs b/modules/tpcc/src/payment.rs new file mode 100644 index 00000000000..ac67d36dd1c --- /dev/null +++ b/modules/tpcc/src/payment.rs @@ -0,0 +1,172 @@ +use spacetimedb::{procedure, ProcedureContext, SpacetimeType, Table, Timestamp, TxContext}; +use spacetimedb_sats::serde::SerdeWrapper; + +use crate::{ + customer, district, find_district, find_warehouse, history, + remote::{call_remote_function, get_spacetimedb_uri, remote_warehouse_home}, + resolve_customer, warehouse, Customer, CustomerSelector, District, History, Warehouse, WarehouseId, MAX_C_DATA_LEN, +}; + +#[derive(Clone, Debug, SpacetimeType)] +pub struct PaymentResult { + pub warehouse_name: String, + pub district_name: String, + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub customer_credit: String, + pub customer_discount_bps: i32, + pub payment_amount_cents: i64, + pub customer_data: Option, +} + +#[procedure] +pub fn payment( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, +) -> Result { + let now = ctx.timestamp; + + let (warehouse_home, spacetimedb_uri) = + ctx.with_tx(|tx| (remote_warehouse_home(tx, c_w_id), get_spacetimedb_uri(tx))); + let payment_request = PaymentRequest { + terminal_warehouse_id: w_id, + terminal_district_id: d_id, + customer_warehouse_id: c_w_id, + customer_district_id: c_d_id, + customer_selector: customer, + payment_amount_cents, + now, + }; + let customer = match warehouse_home { + None => { + // Customer warehouse is managed by this database. + ctx.try_with_tx(|tx| { + let customer = resolve_customer(tx, c_w_id, c_d_id, &payment_request.customer_selector)?; + Ok::<_, String>(update_customer(tx, &payment_request, customer)) + })? + } + Some(remote_database) => { + // Customer warehouse is managed by a remote database. + // Contact them to update the customer's balance and retrieve their info. + let body = call_remote_function( + ctx, + &spacetimedb_uri, + remote_database, + "process_remote_payment", + vec![serde_json::json!(SerdeWrapper(payment_request.clone()))], + )? + .into_string() + .expect("Body should be valid UTF-8"); + let res: SerdeWrapper> = + serde_json::from_str(&body).expect("Response does not conform to expected schema"); + res.0? + } + }; + + ctx.try_with_tx(|tx| { + let warehouse = find_warehouse(tx, payment_request.terminal_warehouse_id)?; + let district = find_district( + tx, + payment_request.terminal_warehouse_id, + payment_request.terminal_district_id, + )?; + + tx.db.warehouse().w_id().update(Warehouse { + w_ytd_cents: warehouse.w_ytd_cents + payment_request.payment_amount_cents, + ..warehouse.clone() + }); + + tx.db.district().district_key().update(District { + d_ytd_cents: district.d_ytd_cents + payment_request.payment_amount_cents, + ..district.clone() + }); + + tx.db.history().insert(History { + history_id: 0, + h_c_id: customer.c_id, + h_c_d_id: customer.c_d_id, + h_c_w_id: customer.c_w_id, + h_d_id: payment_request.terminal_district_id, + h_w_id: payment_request.terminal_warehouse_id, + h_date: payment_request.now, + h_amount_cents: payment_request.payment_amount_cents, + h_data: format!("{} {}", warehouse.w_name, district.d_name), + }); + + Ok(PaymentResult { + warehouse_name: warehouse.w_name, + district_name: district.d_name, + customer_id: customer.c_id, + customer_first: customer.c_first.clone(), + customer_middle: customer.c_middle.clone(), + customer_last: customer.c_last.clone(), + customer_balance_cents: customer.c_balance_cents, + customer_credit: customer.c_credit.clone(), + customer_discount_bps: customer.c_discount_bps, + payment_amount_cents: payment_request.payment_amount_cents, + customer_data: if customer.c_credit == "BC" { + Some(customer.c_data.clone()) + } else { + None + }, + }) + }) +} + +#[derive(SpacetimeType, Clone)] +struct PaymentRequest { + terminal_warehouse_id: WarehouseId, + terminal_district_id: u8, + customer_warehouse_id: WarehouseId, + customer_district_id: u8, + customer_selector: CustomerSelector, + payment_amount_cents: i64, + now: Timestamp, +} + +#[procedure] +fn process_remote_payment(ctx: &mut ProcedureContext, request: PaymentRequest) -> Result { + ctx.try_with_tx(|tx| { + let customer = resolve_customer( + tx, + request.customer_warehouse_id, + request.customer_district_id, + &request.customer_selector, + )?; + Ok(update_customer(tx, &request, customer)) + }) +} + +fn update_customer(tx: &TxContext, request: &PaymentRequest, customer: Customer) -> Customer { + let mut updated_customer = Customer { + c_balance_cents: customer.c_balance_cents - request.payment_amount_cents, + c_ytd_payment_cents: customer.c_ytd_payment_cents + request.payment_amount_cents, + c_payment_cnt: customer.c_payment_cnt + 1, + ..customer + }; + + if updated_customer.c_credit == "BC" { + let prefix = format!( + "{} {} {} {} {} {} {}|", + updated_customer.c_id, + updated_customer.c_d_id, + updated_customer.c_w_id, + request.terminal_district_id, + request.terminal_warehouse_id, + request.payment_amount_cents, + request.now.to_micros_since_unix_epoch() + ); + updated_customer.c_data = format!("{prefix}{}", updated_customer.c_data); + updated_customer.c_data.truncate(MAX_C_DATA_LEN); + } + tx.db.customer().customer_key().update(updated_customer.clone()); + updated_customer +} diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index 372005e8fb1..b2210551c76 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -1,6 +1,8 @@ use http::Request; use spacetimedb::{reducer, table, Identity, ProcedureContext, ReducerContext, Table, TxContext}; +use crate::WarehouseId; + #[table(accessor = spacetimedb_uri)] struct SpacetimeDbUri { uri: String, @@ -18,6 +20,32 @@ pub fn get_spacetimedb_uri(tx: &TxContext) -> String { tx.db.spacetimedb_uri().iter().next().unwrap().uri } +/// For warehouses not managed by this database, stores the [`Identity`] of the remote database which manages that warehouse. +/// +/// Will not have a row present for a warehouse managed by the local database. +#[table(accessor = remote_warehouse)] +pub struct RemoteWarehouse { + #[primary_key] + pub w_id: WarehouseId, + pub remote_database_home: Identity, +} + +#[reducer] +fn load_remote_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + ctx.db.remote_warehouse().try_insert(row)?; + } + Ok(()) +} + +pub fn remote_warehouse_home(ctx: &ReducerContext, warehouse_id: WarehouseId) -> Option { + ctx.db + .remote_warehouse() + .w_id() + .find(warehouse_id) + .map(|row| row.remote_database_home) +} + pub fn call_remote_function( ctx: &mut ProcedureContext, spacetimedb_uri: &str, From d7776c70fcf4a502e2e99ade5da54972c9b4c255 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Fri, 27 Mar 2026 16:41:35 -0400 Subject: [PATCH 07/38] Include content-type header --- modules/tpcc/src/remote.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index b2210551c76..49d3276a719 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -58,6 +58,7 @@ pub fn call_remote_function( "{spacetimedb_uri}/v1/database/{database_ident}/call/{function_name}" )) .method("POST") + .header("Content-Type", "application/json") // TODO(auth): include a token. .body(serde_json::json!(arguments).to_string()) .map_err(|e| format!("Error constructing `Request`: {e}"))?; From 10f21f23cfebbeb2c89c84cff25d6d00b36cc776 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Fri, 27 Mar 2026 17:29:57 -0400 Subject: [PATCH 08/38] Use BSATN for remote calls instead of JSON JSON was getting wacky case-conversion errors I didn't feel like fixing, so I just modified the `database/call` route to accept `application/octet-stream` with BSATN. --- crates/client-api/src/routes/database.rs | 27 +++++++++++++----------- modules/tpcc/src/new_order.rs | 14 ++++++------ modules/tpcc/src/payment.rs | 2 +- modules/tpcc/src/remote.rs | 9 ++++---- 4 files changed, 27 insertions(+), 25 deletions(-) diff --git a/crates/client-api/src/routes/database.rs b/crates/client-api/src/routes/database.rs index 6b753a9c8fd..5b1397e475c 100644 --- a/crates/client-api/src/routes/database.rs +++ b/crates/client-api/src/routes/database.rs @@ -136,13 +136,24 @@ pub async fn call( reducer, }): Path, TypedHeader(content_type): TypedHeader, - ByteStringBody(body): ByteStringBody, + body: Bytes, ) -> axum::response::Result { - assert_content_type_json(content_type)?; - let caller_identity = auth.claims.identity; - let args = FunctionArgs::Json(body); + let args = if content_type == headers::ContentType::json() { + FunctionArgs::Json( + body.try_into() + .map_err(|e| (StatusCode::BAD_REQUEST, format!("JSON body was not valid UTF-8: {e}")))?, + ) + } else if content_type == headers::ContentType::octet_stream() { + FunctionArgs::Bsatn(body) + } else { + return Err(( + StatusCode::BAD_REQUEST, + "Expected a `Content-Type` of either `application/json` or `application/octet-stream`", + ) + .into()); + }; // HTTP callers always need a connection ID to provide to connect/disconnect, // so generate one. @@ -216,14 +227,6 @@ pub async fn call( } } -fn assert_content_type_json(content_type: headers::ContentType) -> axum::response::Result<()> { - if content_type != headers::ContentType::json() { - Err(axum::extract::rejection::MissingJsonContentType::default().into()) - } else { - Ok(()) - } -} - fn reducer_outcome_response( owner_identity: &Identity, reducer: &str, diff --git a/modules/tpcc/src/new_order.rs b/modules/tpcc/src/new_order.rs index 1c441bf8c4c..ad636759fc7 100644 --- a/modules/tpcc/src/new_order.rs +++ b/modules/tpcc/src/new_order.rs @@ -268,12 +268,10 @@ fn reserve_remote_items( &spacetimedb_uri, *remote_database_identity, "reserve_item_for_remote_order", - vec![serde_json::json!(spacetimedb_sats::serde::SerdeWrapper( - ReserveItemInput { - line: NewOrderLineInput::clone(line), - district: district_id, - } - ))], + ReserveItemInput { + line: NewOrderLineInput::clone(line), + district: district_id, + }, ) { Err(e) => { rollback_all_remote_item_reservations( @@ -319,7 +317,7 @@ fn rollback_all_remote_item_reservations( spacetimedb_uri, remote_item.remote_database_identity, "rollback_item_reservation", - vec![serde_json::json!(reservation.rollback_token)], + reservation.rollback_token, ) { log::error!("Error rollinb back item reservation: {e}"); } @@ -338,7 +336,7 @@ fn confirm_all_remote_item_reservations( spacetimedb_uri, remote_item.remote_database_identity, "confirm_item_reservation", - vec![serde_json::json!(reservation.rollback_token)], + reservation.rollback_token, ) { log::error!("Error confirming item reservation: {e}"); } diff --git a/modules/tpcc/src/payment.rs b/modules/tpcc/src/payment.rs index ac67d36dd1c..938e00748ad 100644 --- a/modules/tpcc/src/payment.rs +++ b/modules/tpcc/src/payment.rs @@ -61,7 +61,7 @@ pub fn payment( &spacetimedb_uri, remote_database, "process_remote_payment", - vec![serde_json::json!(SerdeWrapper(payment_request.clone()))], + payment_request.clone(), )? .into_string() .expect("Body should be valid UTF-8"); diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index 49d3276a719..7bb12c82c39 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -1,5 +1,6 @@ use http::Request; -use spacetimedb::{reducer, table, Identity, ProcedureContext, ReducerContext, Table, TxContext}; +use spacetimedb::{reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, Table, TxContext}; +use spacetimedb_sats::bsatn; use crate::WarehouseId; @@ -51,16 +52,16 @@ pub fn call_remote_function( spacetimedb_uri: &str, database_ident: Identity, function_name: &str, - arguments: Vec, + arguments: impl Serialize, ) -> Result { let request = Request::builder() .uri(format!( "{spacetimedb_uri}/v1/database/{database_ident}/call/{function_name}" )) .method("POST") - .header("Content-Type", "application/json") + .header("Content-Type", "application/octet-stream") // TODO(auth): include a token. - .body(serde_json::json!(arguments).to_string()) + .body(bsatn::to_vec(&arguments).map_err(|e| format!("Failed to BSATN-serialize arguments: {e}"))?) .map_err(|e| format!("Error constructing `Request`: {e}"))?; match ctx.http.send(request) { Err(e) => Err(format!("Error sending request to remote database {database_ident} at URI {spacetimedb_uri} to call {function_name}: {e}")), From dd7679516829f3b6eb7fb276284c63091223e852 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Fri, 27 Mar 2026 17:32:34 -0400 Subject: [PATCH 09/38] Break the driver by making the runner support multiple DBs --- tools/tpcc-runner/Cargo.toml | 2 +- tools/tpcc-runner/src/client.rs | 19 ++- tools/tpcc-runner/src/config.rs | 28 ++-- tools/tpcc-runner/src/driver.rs | 6 +- tools/tpcc-runner/src/loader.rs | 150 ++++++++++++++++-- tools/tpcc-runner/src/module_bindings/mod.rs | 17 +- .../src/module_bindings/warehouse_type.rs | 3 - 7 files changed, 189 insertions(+), 36 deletions(-) diff --git a/tools/tpcc-runner/Cargo.toml b/tools/tpcc-runner/Cargo.toml index fdad0563564..9d87faaf0e2 100644 --- a/tools/tpcc-runner/Cargo.toml +++ b/tools/tpcc-runner/Cargo.toml @@ -15,7 +15,7 @@ env_logger.workspace = true log.workspace = true parking_lot.workspace = true rand.workspace = true -reqwest.workspace = true +reqwest = { workspace = true, features = ["blocking", "json"] } serde.workspace = true serde_json.workspace = true tokio.workspace = true diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs index d685f7ee2a7..1055d377e5c 100644 --- a/tools/tpcc-runner/src/client.rs +++ b/tools/tpcc-runner/src/client.rs @@ -5,7 +5,7 @@ use std::time::Duration; use crate::config::ConnectionConfig; use crate::module_bindings::*; -use spacetimedb_sdk::DbContext; +use spacetimedb_sdk::{DbContext, Identity}; pub struct ModuleClient { conn: DbConnection, @@ -14,13 +14,13 @@ pub struct ModuleClient { } impl ModuleClient { - pub fn connect(config: &ConnectionConfig) -> Result { + pub fn connect(config: &ConnectionConfig, database_identity: Identity) -> Result { let (ready_tx, ready_rx) = sync_channel(1); let success_tx = ready_tx.clone(); let error_tx = ready_tx; let mut builder = DbConnection::builder() .with_uri(config.uri.clone()) - .with_database_name(config.database.clone()) + .with_database_name(database_identity.to_string()) .with_confirmed_reads(config.confirmed_reads) .on_connect(move |_, _, _| { let _ = success_tx.send(Ok::<(), anyhow::Error>(())); @@ -74,6 +74,19 @@ impl ModuleClient { } } + pub fn load_remote_warehouses(&self, rows: Vec) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.load_remote_warehouses_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("load_remote_warehouses failed: {}", message), + Ok(Err(err)) => Err(anyhow!("load_remote_warehouses internal error: {}", err)), + Err(_) => bail!("timed out waiting for load_remote_warehouses"), + } + } + pub fn load_warehouses(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_warehouses_then(rows, move |_, res| { diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs index 4042cb85f59..c20e715a69b 100644 --- a/tools/tpcc-runner/src/config.rs +++ b/tools/tpcc-runner/src/config.rs @@ -24,7 +24,7 @@ pub enum Command { #[derive(Debug, Clone)] pub struct ConnectionConfig { pub uri: String, - pub database: String, + pub database_prefix: String, pub token: Option, pub confirmed_reads: bool, pub timeout_secs: u64, @@ -33,7 +33,8 @@ pub struct ConnectionConfig { #[derive(Debug, Clone)] pub struct LoadConfig { pub connection: ConnectionConfig, - pub warehouses: u16, + pub warehouses_per_database: u16, + pub num_databases: u16, pub batch_size: usize, pub reset: bool, } @@ -70,7 +71,9 @@ pub struct LoadArgs { #[command(flatten)] pub connection: ConnectionArgs, #[arg(long)] - pub warehouses: Option, + pub num_databases: Option, + #[arg(long)] + pub warehouses_per_database: Option, #[arg(long)] pub batch_size: Option, #[arg(long)] @@ -128,7 +131,7 @@ pub struct ConnectionArgs { #[arg(long)] pub uri: Option, #[arg(long)] - pub database: Option, + pub database_prefix: Option, #[arg(long)] pub token: Option, #[arg(long)] @@ -152,7 +155,7 @@ pub struct FileConfig { #[derive(Debug, Clone, Default, Deserialize)] struct FileConnectionConfig { uri: Option, - database: Option, + database_prefix: Option, token: Option, confirmed_reads: Option, timeout_secs: Option, @@ -160,7 +163,8 @@ struct FileConnectionConfig { #[derive(Debug, Clone, Default, Deserialize)] struct FileLoadConfig { - warehouses: Option, + num_databases: Option, + warehouses_per_database: Option, batch_size: Option, reset: Option, } @@ -209,10 +213,10 @@ impl ConnectionArgs { .clone() .or_else(|| file.uri.clone()) .unwrap_or_else(|| "http://127.0.0.1:3000".to_string()), - database: self - .database + database_prefix: self + .database_prefix .clone() - .or_else(|| file.database.clone()) + .or_else(|| file.database_prefix.clone()) .unwrap_or_else(|| "tpcc".to_string()), token: self.token.clone().or_else(|| file.token.clone()), confirmed_reads: self.confirmed_reads.or(file.confirmed_reads).unwrap_or(true), @@ -225,7 +229,11 @@ impl LoadArgs { pub fn resolve(&self, file: &FileConfig) -> LoadConfig { LoadConfig { connection: self.connection.resolve(&file.connection), - warehouses: self.warehouses.or(file.load.warehouses).unwrap_or(1), + num_databases: self.num_databases.or(file.load.num_databases).unwrap_or(1), + warehouses_per_database: self + .warehouses_per_database + .or(file.load.warehouses_per_database) + .unwrap_or(1), batch_size: self.batch_size.or(file.load.batch_size).unwrap_or(500), reset: self.reset.or(file.load.reset).unwrap_or(true), } diff --git a/tools/tpcc-runner/src/driver.rs b/tools/tpcc-runner/src/driver.rs index dd7086db45f..0644d517f82 100644 --- a/tools/tpcc-runner/src/driver.rs +++ b/tools/tpcc-runner/src/driver.rs @@ -113,7 +113,7 @@ pub async fn run(config: DriverConfig) -> Result<()> { run_id: run_id.clone(), driver_id: config.driver_id.clone(), uri: config.connection.uri.clone(), - database: config.connection.database.clone(), + database: todo!("config.connection.database.clone()"), terminal_start: config.terminal_start, terminals: config.terminals, warehouse_count: config.warehouse_count, @@ -143,7 +143,7 @@ fn run_terminal(runtime: TerminalRuntime) -> Result<()> { assignment, seed, } = runtime; - let client = ModuleClient::connect(&config.connection)?; + let client = ModuleClient::connect(&config.connection, todo!())?; sleep_until_ms(schedule.warmup_start_ms); let mut rng = StdRng::seed_from_u64(seed); @@ -524,7 +524,7 @@ async fn harvest_delivery_completions( if expected == 0 { return Ok(()); } - let client = ModuleClient::connect(&config.connection)?; + let client = ModuleClient::connect(&config.connection, todo!())?; let progress = expect_ok("delivery_progress", client.delivery_progress(schedule.run_id.clone()))?; log::info!( "delivery progress before harvest: pending_jobs={} completed_jobs={}", diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index f181d51cb65..d8c9ecd27b8 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -1,5 +1,7 @@ use anyhow::{Context, Result}; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use spacetimedb_sdk::Identity; +use std::ops::Range; use std::time::SystemTime; use crate::client::ModuleClient; @@ -16,13 +18,62 @@ const CUSTOMER_INITIAL_YTD_PAYMENT_CENTS: i64 = 1_000; const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; pub fn run(config: LoadConfig) -> Result<()> { + log::info!( + "Loading tpcc dataset into {} databases, all running on {}", + config.num_databases, + config.connection.uri + ); + + let database_identities = lookup_database_identities(&config)?; + + for database_number in 0..config.num_databases { + configure_one_database(&config, database_number, &database_identities)?; + } + + log::info!("tpcc load finished"); + + Ok(()) +} + +fn lookup_database_identities(config: &LoadConfig) -> Result> { + (0..config.num_databases) + .map(|database_number| { + let body = reqwest::blocking::get(format!( + "{}/v1/database/{}-{}", + config.connection.uri, config.connection.database_prefix, database_number + ))?; + let obj = match body.json::()? { + serde_json::Value::Object(obj) => obj, + els => anyhow::bail!("Expected an object but got {els:?}"), + }; + let Some(db_ident) = obj.get("database_identity") else { + anyhow::bail!("Expected a `database_identity` property but saw none in {obj:?}") + }; + let serde_json::Value::Object(ident_obj) = db_ident else { + anyhow::bail!("Expected an object but got {db_ident:?}") + }; + let Some(ident_str) = ident_obj.get("__identity__") else { + anyhow::bail!("Expected a `__identity__` property but saw none in {ident_obj:?}") + }; + let serde_json::Value::String(ident_str) = ident_str else { + anyhow::bail!("Expected a string but got {ident_str:?}") + }; + let ident = Identity::from_hex(ident_str)?; + Ok(ident) + }) + .collect() +} + +fn configure_one_database(config: &LoadConfig, database_number: u16, database_identities: &[Identity]) -> Result<()> { + let database = database_identities[database_number as usize]; log::info!( "loading tpcc dataset into {} / {} with {} warehouse(s)", config.connection.uri, - config.connection.database, - config.warehouses + database, + config.warehouses_per_database ); - let client = ModuleClient::connect(&config.connection)?; + + let client = ModuleClient::connect(&config.connection, database)?; if config.reset { client.reset_tpcc().context("failed to reset tpcc data")?; } @@ -33,12 +84,34 @@ pub fn run(config: LoadConfig) -> Result<()> { let load_c_last = rng.random_range(0..=255); let base_ts = Timestamp::from(SystemTime::now()); + load_remote_warehouses( + &client, + database_number, + config.num_databases, + config.warehouses_per_database, + config.batch_size, + database_identities, + )?; load_items(&client, config.batch_size, &mut rng)?; - load_warehouses_and_districts(&client, config.warehouses, config.batch_size, base_ts, &mut rng)?; - load_stock(&client, config.warehouses, config.batch_size, &mut rng)?; + load_warehouses_and_districts( + &client, + database_number, + config.warehouses_per_database, + config.batch_size, + base_ts, + &mut rng, + )?; + load_stock( + &client, + database_number, + config.warehouses_per_database, + config.batch_size, + &mut rng, + )?; load_customers_history_orders( &client, - config.warehouses, + database_number, + config.warehouses_per_database, config.batch_size, base_ts, load_c_last, @@ -46,7 +119,8 @@ pub fn run(config: LoadConfig) -> Result<()> { )?; client.shutdown(); - log::info!("tpcc load finished"); + log::info!("tpcc load for database {database} finished"); + Ok(()) } @@ -70,9 +144,50 @@ fn load_items(client: &ModuleClient, batch_size: usize, rng: &mut StdRng) -> Res Ok(()) } +fn warehouses_range(database_number: u16, warehouses_per_database: u16) -> Range { + let start_warehouse_number = database_number * warehouses_per_database + 1; + let end_warehouse_number = start_warehouse_number + warehouses_per_database; + start_warehouse_number..end_warehouse_number +} + +fn load_remote_warehouses( + client: &ModuleClient, + database_number: u16, + num_databases: u16, + warehouses_per_database: u16, + batch_size: usize, + database_identities: &[Identity], +) -> Result<()> { + let mut warehouse_batch = Vec::with_capacity(batch_size); + + for other_database_number in 0..num_databases { + if other_database_number == database_number { + continue; + } + let other_database_ident = database_identities[other_database_number as usize]; + + for w_id in warehouses_range(other_database_number, warehouses_per_database) { + warehouse_batch.push(RemoteWarehouse { + w_id, + remote_database_home: other_database_ident, + }); + } + } + + while !warehouse_batch.is_empty() { + let split_at = warehouse_batch.len().min(batch_size); + let remainder = warehouse_batch.split_off(split_at); + let rows = std::mem::replace(&mut warehouse_batch, remainder); + client.load_remote_warehouses(rows)?; + } + + Ok(()) +} + fn load_warehouses_and_districts( client: &ModuleClient, - warehouses: u16, + database_number: u16, + warehouses_per_database: u16, batch_size: usize, timestamp: Timestamp, rng: &mut StdRng, @@ -80,7 +195,7 @@ fn load_warehouses_and_districts( let mut warehouse_batch = Vec::with_capacity(batch_size); let mut district_batch = Vec::with_capacity(batch_size); - for w_id in 1..=warehouses { + for w_id in warehouses_range(database_number, warehouses_per_database) { warehouse_batch.push(Warehouse { w_id, w_name: alpha_string(rng, 6, 10), @@ -91,8 +206,6 @@ fn load_warehouses_and_districts( w_zip: zip_code(rng), w_tax_bps: rng.random_range(0..=2_000), w_ytd_cents: WAREHOUSE_YTD_CENTS, - - remote_database_home: None, }); for d_id in 1..=DISTRICTS_PER_WAREHOUSE { @@ -129,9 +242,15 @@ fn load_warehouses_and_districts( Ok(()) } -fn load_stock(client: &ModuleClient, warehouses: u16, batch_size: usize, rng: &mut StdRng) -> Result<()> { +fn load_stock( + client: &ModuleClient, + database_number: u16, + warehouses_per_database: u16, + batch_size: usize, + rng: &mut StdRng, +) -> Result<()> { let mut batch = Vec::with_capacity(batch_size); - for w_id in 1..=warehouses { + for w_id in warehouses_range(database_number, warehouses_per_database) { for item_id in 1..=ITEMS { batch.push(Stock { stock_key: pack_stock_key(w_id, item_id), @@ -166,7 +285,8 @@ fn load_stock(client: &ModuleClient, warehouses: u16, batch_size: usize, rng: &m fn load_customers_history_orders( client: &ModuleClient, - warehouses: u16, + database_number: u16, + warehouses_per_database: u16, batch_size: usize, timestamp: Timestamp, load_c_last: u32, @@ -178,7 +298,7 @@ fn load_customers_history_orders( let mut new_order_batch = Vec::with_capacity(batch_size); let mut order_line_batch = Vec::with_capacity(batch_size); - for w_id in 1..=warehouses { + for w_id in warehouses_range(database_number, warehouses_per_database) { for d_id in 1..=DISTRICTS_PER_WAREHOUSE { let mut permutation: Vec = (1..=CUSTOMERS_PER_DISTRICT).collect(); permutation.shuffle(rng); diff --git a/tools/tpcc-runner/src/module_bindings/mod.rs b/tools/tpcc-runner/src/module_bindings/mod.rs index 85aee5e3f02..74ce5271415 100644 --- a/tools/tpcc-runner/src/module_bindings/mod.rs +++ b/tools/tpcc-runner/src/module_bindings/mod.rs @@ -1,7 +1,7 @@ // THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE // WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. -// This was generated using spacetimedb cli version 2.1.0 (commit d84b7a7f9ee56d9bd5674a2c4583f20873f0c695). +// This was generated using spacetimedb cli version 2.1.0 (commit 1a74fe9ea4120e1cbc7004d5cb42a443034e238f-dirty). #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; @@ -26,6 +26,7 @@ pub mod load_items_reducer; pub mod load_new_orders_reducer; pub mod load_order_lines_reducer; pub mod load_orders_reducer; +pub mod load_remote_warehouses_reducer; pub mod load_stocks_reducer; pub mod load_warehouses_reducer; pub mod new_order_line_input_type; @@ -39,8 +40,11 @@ pub mod order_status_line_result_type; pub mod order_status_procedure; pub mod order_status_result_type; pub mod payment_procedure; +pub mod payment_request_type; pub mod payment_result_type; +pub mod process_remote_payment_procedure; pub mod queue_delivery_procedure; +pub mod remote_warehouse_type; pub mod reserve_item_for_remote_order_procedure; pub mod reserve_item_input_type; pub mod reserve_item_output_type; @@ -52,6 +56,7 @@ pub mod spacetime_db_uri_type; pub mod stock_level_procedure; pub mod stock_level_result_type; pub mod stock_type; +pub mod test_procedure; pub mod warehouse_type; pub use confirm_item_reservation_reducer::confirm_item_reservation; @@ -74,6 +79,7 @@ pub use load_items_reducer::load_items; pub use load_new_orders_reducer::load_new_orders; pub use load_order_lines_reducer::load_order_lines; pub use load_orders_reducer::load_orders; +pub use load_remote_warehouses_reducer::load_remote_warehouses; pub use load_stocks_reducer::load_stocks; pub use load_warehouses_reducer::load_warehouses; pub use new_order_line_input_type::NewOrderLineInput; @@ -87,8 +93,11 @@ pub use order_status_line_result_type::OrderStatusLineResult; pub use order_status_procedure::order_status; pub use order_status_result_type::OrderStatusResult; pub use payment_procedure::payment; +pub use payment_request_type::PaymentRequest; pub use payment_result_type::PaymentResult; +pub use process_remote_payment_procedure::process_remote_payment; pub use queue_delivery_procedure::queue_delivery; +pub use remote_warehouse_type::RemoteWarehouse; pub use reserve_item_for_remote_order_procedure::reserve_item_for_remote_order; pub use reserve_item_input_type::ReserveItemInput; pub use reserve_item_output_type::ReserveItemOutput; @@ -100,6 +109,7 @@ pub use spacetime_db_uri_type::SpacetimeDbUri; pub use stock_level_procedure::stock_level; pub use stock_level_result_type::StockLevelResult; pub use stock_type::Stock; +pub use test_procedure::test; pub use warehouse_type::Warehouse; #[derive(Clone, PartialEq, Debug)] @@ -118,6 +128,7 @@ pub enum Reducer { LoadNewOrders { rows: Vec }, LoadOrderLines { rows: Vec }, LoadOrders { rows: Vec }, + LoadRemoteWarehouses { rows: Vec }, LoadStocks { rows: Vec }, LoadWarehouses { rows: Vec }, ResetTpcc, @@ -140,6 +151,7 @@ impl __sdk::Reducer for Reducer { Reducer::LoadNewOrders { .. } => "load_new_orders", Reducer::LoadOrderLines { .. } => "load_order_lines", Reducer::LoadOrders { .. } => "load_orders", + Reducer::LoadRemoteWarehouses { .. } => "load_remote_warehouses", Reducer::LoadStocks { .. } => "load_stocks", Reducer::LoadWarehouses { .. } => "load_warehouses", Reducer::ResetTpcc => "reset_tpcc", @@ -177,6 +189,9 @@ impl __sdk::Reducer for Reducer { Reducer::LoadOrders { rows } => { __sats::bsatn::to_vec(&load_orders_reducer::LoadOrdersArgs { rows: rows.clone() }) } + Reducer::LoadRemoteWarehouses { rows } => { + __sats::bsatn::to_vec(&load_remote_warehouses_reducer::LoadRemoteWarehousesArgs { rows: rows.clone() }) + } Reducer::LoadStocks { rows } => { __sats::bsatn::to_vec(&load_stocks_reducer::LoadStocksArgs { rows: rows.clone() }) } diff --git a/tools/tpcc-runner/src/module_bindings/warehouse_type.rs b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs index ed553b73aad..66f02971a4f 100644 --- a/tools/tpcc-runner/src/module_bindings/warehouse_type.rs +++ b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs @@ -16,7 +16,6 @@ pub struct Warehouse { pub w_zip: String, pub w_tax_bps: i32, pub w_ytd_cents: i64, - pub remote_database_home: Option<__sdk::Identity>, } impl __sdk::InModule for Warehouse { @@ -36,7 +35,6 @@ pub struct WarehouseCols { pub w_zip: __sdk::__query_builder::Col, pub w_tax_bps: __sdk::__query_builder::Col, pub w_ytd_cents: __sdk::__query_builder::Col, - pub remote_database_home: __sdk::__query_builder::Col>, } impl __sdk::__query_builder::HasCols for Warehouse { @@ -52,7 +50,6 @@ impl __sdk::__query_builder::HasCols for Warehouse { w_zip: __sdk::__query_builder::Col::new(table_name, "w_zip"), w_tax_bps: __sdk::__query_builder::Col::new(table_name, "w_tax_bps"), w_ytd_cents: __sdk::__query_builder::Col::new(table_name, "w_ytd_cents"), - remote_database_home: __sdk::__query_builder::Col::new(table_name, "remote_database_home"), } } } From 1836f951787664f5b2bf9b40b617e6b765af93fc Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Fri, 27 Mar 2026 18:35:16 -0700 Subject: [PATCH 10/38] update tpcc driver for multiple databases --- .../src/host/wasmtime/wasm_instance_env.rs | 6 +- modules/tpcc/src/new_order.rs | 10 +- tools/tpcc-runner/README.md | 161 +++++++++++++++-- tools/tpcc-runner/src/config.rs | 82 +++++++-- tools/tpcc-runner/src/driver.rs | 163 ++++++++++++------ tools/tpcc-runner/src/lib.rs | 27 +++ tools/tpcc-runner/src/loader.rs | 47 +---- tools/tpcc-runner/src/main.rs | 25 +-- .../load_remote_warehouses_reducer.rs | 68 ++++++++ tools/tpcc-runner/src/module_bindings/mod.rs | 2 +- .../module_bindings/payment_request_type.rs | 23 +++ .../process_remote_payment_procedure.rs | 54 ++++++ .../module_bindings/remote_warehouse_type.rs | 52 ++++++ .../src/module_bindings/test_procedure.rs | 44 +++++ tools/tpcc-runner/src/topology.rs | 105 +++++++++++ tools/tpcc-runner/src/tpcc.rs | 16 +- 16 files changed, 719 insertions(+), 166 deletions(-) create mode 100644 tools/tpcc-runner/src/lib.rs create mode 100644 tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/payment_request_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/process_remote_payment_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/remote_warehouse_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/test_procedure.rs create mode 100644 tools/tpcc-runner/src/topology.rs diff --git a/crates/core/src/host/wasmtime/wasm_instance_env.rs b/crates/core/src/host/wasmtime/wasm_instance_env.rs index 74a57b35e92..fe0ec0a1e9a 100644 --- a/crates/core/src/host/wasmtime/wasm_instance_env.rs +++ b/crates/core/src/host/wasmtime/wasm_instance_env.rs @@ -1925,9 +1925,9 @@ impl WasmInstanceEnv { Ok(0u32) } Err(NodesError::HttpError(err)) => { - let result = bsatn::to_vec(&err).with_context(|| { - format!("Failed to BSATN serialize `spacetimedb_lib::http::Error` object {err:#?}") - })?; + let message = err.to_string(); + let result = bsatn::to_vec(&message) + .with_context(|| format!("Failed to BSATN serialize HTTP error message {message:?}"))?; let bytes_source = WasmInstanceEnv::create_bytes_source(env, result.into())?; bytes_source.0.write_to(mem, out)?; Ok(errno::HTTP_ERROR.get() as u32) diff --git a/modules/tpcc/src/new_order.rs b/modules/tpcc/src/new_order.rs index ad636759fc7..1428459d124 100644 --- a/modules/tpcc/src/new_order.rs +++ b/modules/tpcc/src/new_order.rs @@ -265,7 +265,7 @@ fn reserve_remote_items( { match call_remote_function( ctx, - &spacetimedb_uri, + spacetimedb_uri, *remote_database_identity, "reserve_item_for_remote_order", ReserveItemInput { @@ -276,7 +276,7 @@ fn reserve_remote_items( Err(e) => { rollback_all_remote_item_reservations( ctx, - &spacetimedb_uri, + spacetimedb_uri, remote_database_items, remote_item_reservations, ); @@ -290,7 +290,7 @@ fn reserve_remote_items( Err(e) => { rollback_all_remote_item_reservations( ctx, - &spacetimedb_uri, + spacetimedb_uri, remote_database_items, remote_item_reservations, ); @@ -311,7 +311,7 @@ fn rollback_all_remote_item_reservations( remote_items: &[RemoteDatabaseItem], reservations: Vec, ) { - for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { + for (remote_item, reservation) in remote_items.iter().zip(reservations.into_iter()) { if let Err(e) = call_remote_function( ctx, spacetimedb_uri, @@ -330,7 +330,7 @@ fn confirm_all_remote_item_reservations( remote_items: &[RemoteDatabaseItem], reservations: Vec, ) { - for (remote_item, reservation) in remote_items.into_iter().zip(reservations.into_iter()) { + for (remote_item, reservation) in remote_items.iter().zip(reservations.into_iter()) { if let Err(e) = call_remote_function( ctx, spacetimedb_uri, diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md index 93a7106718e..bafcd5416f8 100644 --- a/tools/tpcc-runner/README.md +++ b/tools/tpcc-runner/README.md @@ -8,21 +8,131 @@ It supports three subcommands: - `driver`: run one benchmark driver with one logical terminal per SDK connection - `coordinator`: synchronize multiple remote drivers and aggregate their summaries +The runner assumes the TPC-C module is published to one or more databases named +`-`, for example `tpcc-0`, `tpcc-1`, `tpcc-2`. +Warehouses are assigned to databases in contiguous ranges: + +- database `0` owns warehouses `1..=warehouses_per_database` +- database `1` owns the next `warehouses_per_database` +- and so on + +`--warehouses` is the total logical warehouse count in the benchmark. +`--warehouse-start` and `--warehouse-count` define the warehouse slice owned by +one driver. The driver always uses exactly `10` terminals per owned warehouse. + +For multi-database runs, the `uri` passed to the loader and driver is also +stored in the module and used for cross-database HTTP calls. In normal builds, +that URI must be a non-private, routable address reachable from the database +host. `127.0.0.1`, `localhost`, and RFC1918 private IPs are rejected by the +module HTTP egress policy. + +For local single-machine development, you can opt into loopback HTTP by +building `spacetimedb-standalone` with: + +```bash +cargo build --release -p spacetimedb-standalone \ + --features spacetimedb-standalone/allow_loopback_http_for_tests +``` + +With that feature enabled, multi-database localhost runs can use +`http://127.0.0.1:3000`. This is intended for local testing, not a normal +production configuration. + ## Local workflow -1. Publish or start the `modules/tpcc` module. -2. Load data: +1. Build the release binaries you need. ```bash -cargo run -p tpcc-runner -- load --database tpcc --warehouses 1 +cargo build --release -p spacetimedb-cli -p spacetimedb-standalone -p tpcc-runner ``` -3. Run a single local driver: +2. Start a local SpacetimeDB server. ```bash -cargo run -p tpcc-runner -- driver --database tpcc --warehouses 1 --terminals 10 --warmup-secs 5 --measure-secs 30 +cargo run --release -p spacetimedb-cli -- start --listen-addr 127.0.0.1:3000 ``` +3. Publish the TPC-C module to one or more databases. For a single database: + +```bash +cargo run -p spacetimedb-cli -- publish \ + --server http://127.0.0.1:3000 \ + --module-path modules/tpcc \ + -c=always \ + -y \ + tpcc-0 +``` + +For two databases: + +```bash +cargo run -p spacetimedb-cli -- publish \ + --server http://127.0.0.1:3000 \ + --module-path modules/tpcc \ + -c=always \ + -y \ + tpcc-0 + +cargo run -p spacetimedb-cli -- publish \ + --server http://127.0.0.1:3000 \ + --module-path modules/tpcc \ + -c=always \ + -y \ + tpcc-1 +``` + +4. Load data. For one warehouse in one database: + +```bash +cargo run --release -p tpcc-runner -- load \ + --uri http://127.0.0.1:3000 \ + --database-prefix tpcc \ + --num-databases 1 \ + --warehouses-per-database 1 \ + --reset true +``` + +For two databases with one warehouse each on the same machine: + +```bash +cargo run --release -p tpcc-runner -- load \ + --uri http://127.0.0.1:3000 \ + --database-prefix tpcc \ + --num-databases 2 \ + --warehouses-per-database 1 \ + --reset true +``` + +5. Run a single local driver against one warehouse: + +```bash +cargo run --release -p tpcc-runner -- driver \ + --uri http://127.0.0.1:3000 \ + --database-prefix tpcc \ + --warehouses 1 \ + --warehouses-per-database 1 \ + --warmup-secs 5 \ + --measure-secs 30 +``` + +If you want to load multiple databases on one machine and actually exercise all +loaded warehouses, set `--warehouses` to the total logical warehouse count. For +example, after loading two databases with one warehouse each, a single-driver +run would be: + +```bash +cargo run --release -p tpcc-runner -- driver \ + --uri http://127.0.0.1:3000 \ + --database-prefix tpcc \ + --warehouses 2 \ + --warehouses-per-database 1 \ + --warmup-secs 5 \ + --measure-secs 30 +``` + +Using `--warehouses 1` after loading two one-warehouse databases will only +benchmark warehouse `1`; warehouse `2` will remain unused. + The driver writes: - `summary.json` @@ -32,19 +142,43 @@ under `tpcc-results///` unless `--output-dir` is provided. ## Distributed workflow +To run multiple databases across machines, first publish `tpcc-0`, `tpcc-1`, +... and load them using a routable, non-private server URL, for example +`http://public-host:3000` or a public DNS name pointing at the SpacetimeDB +server. Build `tpcc-runner` in release mode on each driver machine before +running the commands below. + Start the coordinator: ```bash cargo run -p tpcc-runner -- coordinator --expected-drivers 2 --warmup-secs 5 --measure-secs 30 ``` -Start each remote driver with disjoint terminal ranges: +Start each remote driver with a disjoint warehouse slice. This example assumes +two databases with one warehouse each: ```bash -cargo run -p tpcc-runner -- driver --database tpcc --warehouses 2 --terminal-start 1 --terminals 10 --coordinator-url http://coordinator-host:7878 -cargo run -p tpcc-runner -- driver --database tpcc --warehouses 2 --terminal-start 11 --terminals 10 --coordinator-url http://coordinator-host:7878 +cargo run --release -p tpcc-runner -- driver \ + --uri http://public-server-host:3000 \ + --database-prefix tpcc \ + --warehouses 2 \ + --warehouses-per-database 1 \ + --warehouse-start 1 \ + --warehouse-count 1 \ + --coordinator-url http://coordinator-host:7878 + +cargo run --release -p tpcc-runner -- driver \ + --uri http://public-server-host:3000 \ + --database-prefix tpcc \ + --warehouses 2 \ + --warehouses-per-database 1 \ + --warehouse-start 2 \ + --warehouse-count 1 \ + --coordinator-url http://coordinator-host:7878 ``` +Those two drivers together cover warehouse `1` and warehouse `2`. + When all expected drivers register, the coordinator publishes a common schedule and writes an aggregated `summary.json` under `tpcc-results/coordinator//`. ## Config file @@ -54,20 +188,22 @@ All subcommands accept `--config `. The file is TOML with optional section ```toml [connection] uri = "http://127.0.0.1:3000" -database = "tpcc" +database_prefix = "tpcc" confirmed_reads = true timeout_secs = 30 [load] -warehouses = 1 +num_databases = 1 +warehouses_per_database = 1 batch_size = 500 reset = true [driver] driver_id = "driver-a" -terminal_start = 1 -terminals = 10 warehouses = 1 +warehouses_per_database = 1 +warehouse_start = 1 +warehouse_count = 1 warmup_secs = 5 measure_secs = 30 delivery_wait_secs = 60 @@ -90,6 +226,5 @@ CLI flags override config-file values. If the module signatures change, regenerate the Rust SDK bindings: ```bash -cargo build -p spacetimedb-standalone cargo run -p spacetimedb-cli -- generate --lang rust --out-dir tools/tpcc-runner/src/module_bindings --module-path modules/tpcc --yes ``` diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs index c20e715a69b..8cae12659e5 100644 --- a/tools/tpcc-runner/src/config.rs +++ b/tools/tpcc-runner/src/config.rs @@ -44,9 +44,10 @@ pub struct DriverConfig { pub connection: ConnectionConfig, pub run_id: Option, pub driver_id: String, - pub terminal_start: u32, - pub terminals: u32, pub warehouse_count: u16, + pub warehouse_start: u16, + pub driver_warehouse_count: u16, + pub warehouses_per_database: u16, pub warmup_secs: u64, pub measure_secs: u64, pub output_dir: Option, @@ -89,12 +90,14 @@ pub struct DriverArgs { #[arg(long)] pub driver_id: Option, #[arg(long)] - pub terminal_start: Option, - #[arg(long)] - pub terminals: Option, + pub warehouse_start: Option, + #[arg(long = "warehouse-count")] + pub driver_warehouse_count: Option, #[arg(long)] pub warehouses: Option, #[arg(long)] + pub warehouses_per_database: Option, + #[arg(long)] pub warmup_secs: Option, #[arg(long)] pub measure_secs: Option, @@ -173,9 +176,11 @@ struct FileLoadConfig { struct FileDriverConfig { run_id: Option, driver_id: Option, - terminal_start: Option, - terminals: Option, + warehouse_start: Option, + #[serde(rename = "warehouse_count")] + driver_warehouse_count: Option, warehouses: Option, + warehouses_per_database: Option, warmup_secs: Option, measure_secs: Option, output_dir: Option, @@ -244,13 +249,43 @@ impl DriverArgs { pub fn resolve(&self, file: &FileConfig) -> Result { let connection = self.connection.resolve(&file.connection); let warehouse_count = self.warehouses.or(file.driver.warehouses).unwrap_or(1); - let terminals = self - .terminals - .or(file.driver.terminals) - .unwrap_or(u32::from(warehouse_count) * 10); - let terminal_start = self.terminal_start.or(file.driver.terminal_start).unwrap_or(1); - if terminals == 0 { - bail!("terminal count must be positive"); + let warehouse_start = self.warehouse_start.or(file.driver.warehouse_start).unwrap_or(1); + if warehouse_start == 0 { + bail!("warehouse_start must be positive"); + } + if warehouse_start > warehouse_count { + bail!( + "warehouse_start {} exceeds total warehouses {}", + warehouse_start, + warehouse_count + ); + } + let remaining_warehouses = warehouse_count - warehouse_start + 1; + let driver_warehouse_count = self + .driver_warehouse_count + .or(file.driver.driver_warehouse_count) + .unwrap_or(remaining_warehouses); + if driver_warehouse_count == 0 { + bail!("warehouse_count must be positive"); + } + let warehouse_end = warehouse_start + .checked_add(driver_warehouse_count - 1) + .context("warehouse range overflowed")?; + if warehouse_end > warehouse_count { + bail!( + "warehouse range {}..={} exceeds total warehouses {}", + warehouse_start, + warehouse_end, + warehouse_count + ); + } + let warehouses_per_database = self + .warehouses_per_database + .or(file.driver.warehouses_per_database) + .or(file.load.warehouses_per_database) + .unwrap_or(warehouse_count); + if warehouses_per_database == 0 { + bail!("warehouses_per_database must be positive"); } Ok(DriverConfig { connection, @@ -260,9 +295,10 @@ impl DriverArgs { .clone() .or_else(|| file.driver.driver_id.clone()) .unwrap_or_else(default_driver_id), - terminal_start, - terminals, warehouse_count, + warehouse_start, + driver_warehouse_count, + warehouses_per_database, warmup_secs: self.warmup_secs.or(file.driver.warmup_secs).unwrap_or(5), measure_secs: self.measure_secs.or(file.driver.measure_secs).unwrap_or(30), output_dir: self.output_dir.clone().or_else(|| file.driver.output_dir.clone()), @@ -312,3 +348,17 @@ pub fn default_run_id() -> String { pub fn default_driver_id() -> String { format!("driver-{}", std::process::id()) } + +impl DriverConfig { + pub fn warehouse_end(&self) -> u16 { + self.warehouse_start + self.driver_warehouse_count - 1 + } + + pub fn terminal_start(&self) -> u32 { + (u32::from(self.warehouse_start) - 1) * u32::from(crate::tpcc::DISTRICTS_PER_WAREHOUSE) + 1 + } + + pub fn terminals(&self) -> u32 { + u32::from(self.driver_warehouse_count) * u32::from(crate::tpcc::DISTRICTS_PER_WAREHOUSE) + } +} diff --git a/tools/tpcc-runner/src/driver.rs b/tools/tpcc-runner/src/driver.rs index 0644d517f82..09d7723d391 100644 --- a/tools/tpcc-runner/src/driver.rs +++ b/tools/tpcc-runner/src/driver.rs @@ -14,6 +14,7 @@ use crate::protocol::{ RegisterDriverRequest, RegisterDriverResponse, RunSchedule, ScheduleResponse, SubmitSummaryRequest, }; use crate::summary::{write_json, DriverSummary, DriverSummaryMeta, SharedMetrics, TransactionKind, TransactionRecord}; +use crate::topology::DatabaseTopology; use crate::tpcc::*; struct TerminalRuntime { @@ -24,6 +25,7 @@ struct TerminalRuntime { schedule: RunSchedule, run_constants: RunConstants, assignment: TerminalAssignment, + database_identity: spacetimedb_sdk::Identity, seed: u64, } @@ -42,47 +44,51 @@ pub async fn run(config: DriverConfig) -> Result<()> { let run_id = schedule.run_id.clone(); let output_dir = resolve_output_dir(&config, &run_id); fs::create_dir_all(&output_dir).with_context(|| format!("failed to create {}", output_dir.display()))?; + let topology = DatabaseTopology::for_driver(&config).await?; + let used_database_numbers = databases_for_warehouse_slice(&config); + let database_summary = describe_databases(&topology, &used_database_numbers); let events_path = output_dir.join("txn_events.ndjson"); let summary_path = output_dir.join("summary.json"); let metrics = SharedMetrics::create(&run_id, &config.driver_id, &events_path)?; let run_constants = { - let mut rng = StdRng::seed_from_u64(schedule.measure_start_ms ^ u64::from(config.terminal_start)); + let mut rng = StdRng::seed_from_u64(schedule.measure_start_ms ^ u64::from(config.warehouse_start)); generate_run_constants(&mut rng) }; let abort = Arc::new(AtomicBool::new(false)); let request_ids = Arc::new(AtomicU64::new(1)); - let mut handles = Vec::with_capacity(config.terminals as usize); - - for offset in 0..config.terminals { - let terminal_id = config.terminal_start + offset; - let assignment = assign_terminal(terminal_id, config.warehouse_count).ok_or_else(|| { - anyhow!( - "terminal {} exceeds warehouse capacity {}", - terminal_id, - config.warehouse_count - ) - })?; - let terminal_seed = schedule.measure_start_ms ^ ((terminal_id as u64) << 32) ^ 0xabcdu64; - let terminal_config = config.clone(); - let terminal_metrics = metrics.clone(); - let terminal_abort = abort.clone(); - let terminal_constants = run_constants.clone(); - let terminal_schedule = schedule.clone(); - let terminal_request_ids = request_ids.clone(); - let runtime = TerminalRuntime { - config: terminal_config, - metrics: terminal_metrics, - abort: terminal_abort, - request_ids: terminal_request_ids, - schedule: terminal_schedule, - run_constants: terminal_constants, - assignment, - seed: terminal_seed, - }; - handles.push(thread::spawn(move || run_terminal(runtime))); + let mut handles = Vec::with_capacity(config.terminals() as usize); + + for warehouse_id in config.warehouse_start..=config.warehouse_end() { + let database_identity = topology.identity_for_warehouse(warehouse_id)?; + for district_id in 1..=DISTRICTS_PER_WAREHOUSE { + let assignment = TerminalAssignment { + terminal_id: terminal_id(warehouse_id, district_id), + warehouse_id, + district_id, + }; + let terminal_seed = schedule.measure_start_ms ^ ((assignment.terminal_id as u64) << 32) ^ 0xabcdu64; + let terminal_config = config.clone(); + let terminal_metrics = metrics.clone(); + let terminal_abort = abort.clone(); + let terminal_constants = run_constants.clone(); + let terminal_schedule = schedule.clone(); + let terminal_request_ids = request_ids.clone(); + let runtime = TerminalRuntime { + config: terminal_config, + metrics: terminal_metrics, + abort: terminal_abort, + request_ids: terminal_request_ids, + schedule: terminal_schedule, + run_constants: terminal_constants, + assignment, + database_identity, + seed: terminal_seed, + }; + handles.push(thread::spawn(move || run_terminal(runtime))); + } } let mut first_error: Option = None; @@ -107,15 +113,15 @@ pub async fn run(config: DriverConfig) -> Result<()> { return Err(err); } - harvest_delivery_completions(&config, &schedule, &metrics).await?; + harvest_delivery_completions(&config, &schedule, &metrics, &topology, &used_database_numbers).await?; let summary = metrics.finalize(DriverSummaryMeta { run_id: run_id.clone(), driver_id: config.driver_id.clone(), uri: config.connection.uri.clone(), - database: todo!("config.connection.database.clone()"), - terminal_start: config.terminal_start, - terminals: config.terminals, + database: database_summary, + terminal_start: config.terminal_start(), + terminals: config.terminals(), warehouse_count: config.warehouse_count, warmup_secs: config.warmup_secs, measure_secs: config.measure_secs, @@ -141,9 +147,10 @@ fn run_terminal(runtime: TerminalRuntime) -> Result<()> { schedule, run_constants, assignment, + database_identity, seed, } = runtime; - let client = ModuleClient::connect(&config.connection, todo!())?; + let client = ModuleClient::connect(&config.connection, database_identity)?; sleep_until_ms(schedule.warmup_start_ms); let mut rng = StdRng::seed_from_u64(seed); @@ -466,8 +473,8 @@ async fn resolve_schedule(config: &DriverConfig) -> Result { let client = reqwest::Client::new(); let register = RegisterDriverRequest { driver_id: config.driver_id.clone(), - terminal_start: config.terminal_start, - terminals: config.terminals, + terminal_start: config.terminal_start(), + terminals: config.terminals(), warehouse_count: config.warehouse_count, }; let response: RegisterDriverResponse = client @@ -519,43 +526,71 @@ async fn harvest_delivery_completions( config: &DriverConfig, schedule: &RunSchedule, metrics: &SharedMetrics, + topology: &DatabaseTopology, + used_database_numbers: &[u16], ) -> Result<()> { let expected = metrics.delivery_queued(); if expected == 0 { return Ok(()); } - let client = ModuleClient::connect(&config.connection, todo!())?; - let progress = expect_ok("delivery_progress", client.delivery_progress(schedule.run_id.clone()))?; + let clients = used_database_numbers + .iter() + .map(|database_number| { + let database_identity = topology.identity_for_database_number(*database_number)?; + ModuleClient::connect(&config.connection, database_identity) + .map(|client| (*database_number, client)) + .with_context(|| { + format!( + "failed to connect delivery harvester to {}", + topology.database_name(*database_number) + ) + }) + }) + .collect::>>()?; + + let mut pending_jobs = 0u64; + let mut completed_jobs = 0u64; + for (_, client) in &clients { + let progress = expect_ok("delivery_progress", client.delivery_progress(schedule.run_id.clone()))?; + pending_jobs += progress.pending_jobs; + completed_jobs += progress.completed_jobs; + } log::info!( "delivery progress before harvest: pending_jobs={} completed_jobs={}", - progress.pending_jobs, - progress.completed_jobs + pending_jobs, + completed_jobs ); let deadline = crate::summary::now_millis() + (config.delivery_wait_secs * 1_000); let mut seen_for_driver = 0u64; - let mut after_completion_id = 0u64; + let mut after_completion_ids = vec![0u64; clients.len()]; loop { if seen_for_driver >= expected { break; } - let batch = expect_ok( - "fetch_delivery_completions", - client.fetch_delivery_completions(schedule.run_id.clone(), after_completion_id, 512), - )?; - if batch.is_empty() { + let mut saw_rows = false; + for ((_, client), after_completion_id) in clients.iter().zip(after_completion_ids.iter_mut()) { + let batch = expect_ok( + "fetch_delivery_completions", + client.fetch_delivery_completions(schedule.run_id.clone(), *after_completion_id, 512), + )?; + if batch.is_empty() { + continue; + } + saw_rows = true; + for row in batch { + *after_completion_id = (*after_completion_id).max(row.completion_id); + if row.driver_id == config.driver_id { + seen_for_driver += 1; + metrics.record_delivery_completion(&row); + } + } + } + if !saw_rows { if crate::summary::now_millis() >= deadline { break; } tokio::time::sleep(Duration::from_millis(250)).await; - continue; - } - for row in batch { - after_completion_id = after_completion_id.max(row.completion_id); - if row.driver_id == config.driver_id { - seen_for_driver += 1; - metrics.record_delivery_completion(&row); - } } } @@ -568,10 +603,26 @@ async fn harvest_delivery_completions( ); } - client.shutdown(); + for (_, client) in clients { + client.shutdown(); + } Ok(()) } +fn databases_for_warehouse_slice(config: &DriverConfig) -> Vec { + let first = (config.warehouse_start - 1) / config.warehouses_per_database; + let last = (config.warehouse_end() - 1) / config.warehouses_per_database; + (first..=last).collect() +} + +fn describe_databases(topology: &DatabaseTopology, used_database_numbers: &[u16]) -> String { + used_database_numbers + .iter() + .map(|database_number| topology.database_name(*database_number)) + .collect::>() + .join(",") +} + async fn submit_summary(coordinator_url: &str, summary: DriverSummary) -> Result<()> { let client = reqwest::Client::new(); client diff --git a/tools/tpcc-runner/src/lib.rs b/tools/tpcc-runner/src/lib.rs new file mode 100644 index 00000000000..3139e6983b5 --- /dev/null +++ b/tools/tpcc-runner/src/lib.rs @@ -0,0 +1,27 @@ +mod client; +pub mod config; +pub mod coordinator; +pub mod driver; +pub mod loader; +mod module_bindings; +mod protocol; +pub mod summary; +mod topology; +mod tpcc; + +use config::{Cli, Command, FileConfig}; +use env_logger::Env; + +pub fn init_logging() { + let _ = env_logger::Builder::from_env(Env::default().default_filter_or("tpcc_runner=info")).try_init(); +} + +pub async fn run_cli(cli: Cli) -> anyhow::Result<()> { + let file_config = FileConfig::load(cli.config.as_deref())?; + + match cli.command { + Command::Load(args) => loader::run(args.resolve(&file_config)).await, + Command::Driver(args) => driver::run(args.resolve(&file_config)?).await, + Command::Coordinator(args) => coordinator::run(args.resolve(&file_config)?).await, + } +} diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index d8c9ecd27b8..8d21c1c11f3 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -1,12 +1,12 @@ use anyhow::{Context, Result}; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use spacetimedb_sdk::Identity; use std::ops::Range; use std::time::SystemTime; use crate::client::ModuleClient; use crate::config::LoadConfig; use crate::module_bindings::*; +use crate::topology::DatabaseTopology; use crate::tpcc::*; use spacetimedb_sdk::Timestamp; @@ -17,17 +17,17 @@ const CUSTOMER_INITIAL_BALANCE_CENTS: i64 = -1_000; const CUSTOMER_INITIAL_YTD_PAYMENT_CENTS: i64 = 1_000; const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; -pub fn run(config: LoadConfig) -> Result<()> { +pub async fn run(config: LoadConfig) -> Result<()> { log::info!( "Loading tpcc dataset into {} databases, all running on {}", config.num_databases, config.connection.uri ); - let database_identities = lookup_database_identities(&config)?; + let topology = DatabaseTopology::for_load(&config).await?; for database_number in 0..config.num_databases { - configure_one_database(&config, database_number, &database_identities)?; + configure_one_database(&config, database_number, &topology)?; } log::info!("tpcc load finished"); @@ -35,37 +35,8 @@ pub fn run(config: LoadConfig) -> Result<()> { Ok(()) } -fn lookup_database_identities(config: &LoadConfig) -> Result> { - (0..config.num_databases) - .map(|database_number| { - let body = reqwest::blocking::get(format!( - "{}/v1/database/{}-{}", - config.connection.uri, config.connection.database_prefix, database_number - ))?; - let obj = match body.json::()? { - serde_json::Value::Object(obj) => obj, - els => anyhow::bail!("Expected an object but got {els:?}"), - }; - let Some(db_ident) = obj.get("database_identity") else { - anyhow::bail!("Expected a `database_identity` property but saw none in {obj:?}") - }; - let serde_json::Value::Object(ident_obj) = db_ident else { - anyhow::bail!("Expected an object but got {db_ident:?}") - }; - let Some(ident_str) = ident_obj.get("__identity__") else { - anyhow::bail!("Expected a `__identity__` property but saw none in {ident_obj:?}") - }; - let serde_json::Value::String(ident_str) = ident_str else { - anyhow::bail!("Expected a string but got {ident_str:?}") - }; - let ident = Identity::from_hex(ident_str)?; - Ok(ident) - }) - .collect() -} - -fn configure_one_database(config: &LoadConfig, database_number: u16, database_identities: &[Identity]) -> Result<()> { - let database = database_identities[database_number as usize]; +fn configure_one_database(config: &LoadConfig, database_number: u16, topology: &DatabaseTopology) -> Result<()> { + let database = topology.identity_for_database_number(database_number)?; log::info!( "loading tpcc dataset into {} / {} with {} warehouse(s)", config.connection.uri, @@ -90,7 +61,7 @@ fn configure_one_database(config: &LoadConfig, database_number: u16, database_id config.num_databases, config.warehouses_per_database, config.batch_size, - database_identities, + topology, )?; load_items(&client, config.batch_size, &mut rng)?; load_warehouses_and_districts( @@ -156,7 +127,7 @@ fn load_remote_warehouses( num_databases: u16, warehouses_per_database: u16, batch_size: usize, - database_identities: &[Identity], + topology: &DatabaseTopology, ) -> Result<()> { let mut warehouse_batch = Vec::with_capacity(batch_size); @@ -164,7 +135,7 @@ fn load_remote_warehouses( if other_database_number == database_number { continue; } - let other_database_ident = database_identities[other_database_number as usize]; + let other_database_ident = topology.identity_for_database_number(other_database_number)?; for w_id in warehouses_range(other_database_number, warehouses_per_database) { warehouse_batch.push(RemoteWarehouse { diff --git a/tools/tpcc-runner/src/main.rs b/tools/tpcc-runner/src/main.rs index 6fd29df23d4..c347aa2d387 100644 --- a/tools/tpcc-runner/src/main.rs +++ b/tools/tpcc-runner/src/main.rs @@ -1,27 +1,8 @@ -mod client; -mod config; -mod coordinator; -mod driver; -mod loader; -mod module_bindings; -mod protocol; -mod summary; -mod tpcc; - use clap::Parser; -use config::{Cli, Command, FileConfig}; -use env_logger::Env; +use tpcc_runner::{config::Cli, init_logging, run_cli}; #[tokio::main] async fn main() -> anyhow::Result<()> { - env_logger::Builder::from_env(Env::default().default_filter_or("tpcc_runner=info")).init(); - - let cli = Cli::parse(); - let file_config = FileConfig::load(cli.config.as_deref())?; - - match cli.command { - Command::Load(args) => loader::run(args.resolve(&file_config)), - Command::Driver(args) => driver::run(args.resolve(&file_config)?).await, - Command::Coordinator(args) => coordinator::run(args.resolve(&file_config)?).await, - } + init_logging(); + run_cli(Cli::parse()).await } diff --git a/tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs new file mode 100644 index 00000000000..f54aa582640 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::remote_warehouse_type::RemoteWarehouse; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadRemoteWarehousesArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadRemoteWarehousesArgs) -> Self { + Self::LoadRemoteWarehouses { rows: args.rows } + } +} + +impl __sdk::InModule for LoadRemoteWarehousesArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_remote_warehouses`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_remote_warehouses { + /// Request that the remote module invoke the reducer `load_remote_warehouses` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_remote_warehouses:load_remote_warehouses_then`] to run a callback after the reducer completes. + fn load_remote_warehouses(&self, rows: Vec) -> __sdk::Result<()> { + self.load_remote_warehouses_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_remote_warehouses` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_remote_warehouses_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_remote_warehouses for super::RemoteReducers { + fn load_remote_warehouses_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadRemoteWarehousesArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/mod.rs b/tools/tpcc-runner/src/module_bindings/mod.rs index 74ce5271415..a3d2e2bbdd7 100644 --- a/tools/tpcc-runner/src/module_bindings/mod.rs +++ b/tools/tpcc-runner/src/module_bindings/mod.rs @@ -1,7 +1,7 @@ // THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE // WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. -// This was generated using spacetimedb cli version 2.1.0 (commit 1a74fe9ea4120e1cbc7004d5cb42a443034e238f-dirty). +// This was generated using spacetimedb cli version 2.1.0 (commit 0a24939b80db8d4a743625fb64c7c333eca35479). #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; diff --git a/tools/tpcc-runner/src/module_bindings/payment_request_type.rs b/tools/tpcc-runner/src/module_bindings/payment_request_type.rs new file mode 100644 index 00000000000..b583fced851 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/payment_request_type.rs @@ -0,0 +1,23 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_selector_type::CustomerSelector; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct PaymentRequest { + pub terminal_warehouse_id: u16, + pub terminal_district_id: u8, + pub customer_warehouse_id: u16, + pub customer_district_id: u8, + pub customer_selector: CustomerSelector, + pub payment_amount_cents: i64, + pub now: __sdk::Timestamp, +} + +impl __sdk::InModule for PaymentRequest { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/process_remote_payment_procedure.rs b/tools/tpcc-runner/src/module_bindings/process_remote_payment_procedure.rs new file mode 100644 index 00000000000..078e18b2107 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/process_remote_payment_procedure.rs @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_type::Customer; +use super::payment_request_type::PaymentRequest; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct ProcessRemotePaymentArgs { + pub request: PaymentRequest, +} + +impl __sdk::InModule for ProcessRemotePaymentArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `process_remote_payment`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait process_remote_payment { + fn process_remote_payment(&self, request: PaymentRequest) { + self.process_remote_payment_then(request, |_, _| {}); + } + + fn process_remote_payment_then( + &self, + request: PaymentRequest, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl process_remote_payment for super::RemoteProcedures { + fn process_remote_payment_then( + &self, + request: PaymentRequest, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp.invoke_procedure_with_callback::<_, Result>( + "process_remote_payment", + ProcessRemotePaymentArgs { request }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/remote_warehouse_type.rs b/tools/tpcc-runner/src/module_bindings/remote_warehouse_type.rs new file mode 100644 index 00000000000..43f99910666 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/remote_warehouse_type.rs @@ -0,0 +1,52 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct RemoteWarehouse { + pub w_id: u16, + pub remote_database_home: __sdk::Identity, +} + +impl __sdk::InModule for RemoteWarehouse { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `RemoteWarehouse`. +/// +/// Provides typed access to columns for query building. +pub struct RemoteWarehouseCols { + pub w_id: __sdk::__query_builder::Col, + pub remote_database_home: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for RemoteWarehouse { + type Cols = RemoteWarehouseCols; + fn cols(table_name: &'static str) -> Self::Cols { + RemoteWarehouseCols { + w_id: __sdk::__query_builder::Col::new(table_name, "w_id"), + remote_database_home: __sdk::__query_builder::Col::new(table_name, "remote_database_home"), + } + } +} + +/// Indexed column accessor struct for the table `RemoteWarehouse`. +/// +/// Provides typed access to indexed columns for query building. +pub struct RemoteWarehouseIxCols { + pub w_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for RemoteWarehouse { + type IxCols = RemoteWarehouseIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + RemoteWarehouseIxCols { + w_id: __sdk::__query_builder::IxCol::new(table_name, "w_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for RemoteWarehouse {} diff --git a/tools/tpcc-runner/src/module_bindings/test_procedure.rs b/tools/tpcc-runner/src/module_bindings/test_procedure.rs new file mode 100644 index 00000000000..655a46e173e --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/test_procedure.rs @@ -0,0 +1,44 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct TestArgs {} + +impl __sdk::InModule for TestArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `test`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait test { + fn test(&self) { + self.test_then(|_, _| {}); + } + + fn test_then( + &self, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl test for super::RemoteProcedures { + fn test_then( + &self, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>("test", TestArgs {}, __callback); + } +} diff --git a/tools/tpcc-runner/src/topology.rs b/tools/tpcc-runner/src/topology.rs new file mode 100644 index 00000000000..5b0fd5636a3 --- /dev/null +++ b/tools/tpcc-runner/src/topology.rs @@ -0,0 +1,105 @@ +use anyhow::{bail, Result}; +use reqwest::Client; +use spacetimedb_sdk::Identity; + +use crate::config::{ConnectionConfig, DriverConfig, LoadConfig}; + +#[derive(Clone, Debug)] +pub struct DatabaseTopology { + database_prefix: String, + warehouses_per_database: u16, + identities: Vec, +} + +impl DatabaseTopology { + pub async fn for_load(config: &LoadConfig) -> Result { + ensure_warehouses_per_database(config.warehouses_per_database)?; + Ok(Self { + database_prefix: config.connection.database_prefix.clone(), + warehouses_per_database: config.warehouses_per_database, + identities: lookup_database_identities(&config.connection, config.num_databases).await?, + }) + } + + pub async fn for_driver(config: &DriverConfig) -> Result { + ensure_warehouses_per_database(config.warehouses_per_database)?; + Ok(Self { + database_prefix: config.connection.database_prefix.clone(), + warehouses_per_database: config.warehouses_per_database, + identities: lookup_database_identities( + &config.connection, + required_database_count(config.warehouse_count, config.warehouses_per_database), + ) + .await?, + }) + } + + pub fn database_name(&self, database_number: u16) -> String { + format!("{}-{}", self.database_prefix, database_number) + } + + pub fn identity_for_database_number(&self, database_number: u16) -> Result { + self.identities.get(database_number as usize).copied().ok_or_else(|| { + anyhow::anyhow!( + "missing database identity for database {}", + self.database_name(database_number) + ) + }) + } + + pub fn database_number_for_warehouse(&self, warehouse_id: u16) -> Result { + if warehouse_id == 0 { + bail!("warehouse id must be positive"); + } + Ok((warehouse_id - 1) / self.warehouses_per_database) + } + + pub fn identity_for_warehouse(&self, warehouse_id: u16) -> Result { + let database_number = self.database_number_for_warehouse(warehouse_id)?; + self.identity_for_database_number(database_number) + } +} + +pub fn required_database_count(warehouse_count: u16, warehouses_per_database: u16) -> u16 { + warehouse_count.div_ceil(warehouses_per_database) +} + +pub async fn lookup_database_identities(connection: &ConnectionConfig, num_databases: u16) -> Result> { + let client = Client::new(); + let mut identities = Vec::with_capacity(num_databases as usize); + for database_number in 0..num_databases { + let body = client + .get(format!( + "{}/v1/database/{}-{}", + connection.uri, connection.database_prefix, database_number + )) + .send() + .await? + .error_for_status()?; + let obj = match body.json::().await? { + serde_json::Value::Object(obj) => obj, + els => bail!("expected object while resolving database identity, got {els:?}"), + }; + let Some(db_ident) = obj.get("database_identity") else { + bail!("missing database_identity in response {obj:?}"); + }; + let serde_json::Value::Object(ident_obj) = db_ident else { + bail!("expected database_identity object, got {db_ident:?}"); + }; + let Some(ident_str) = ident_obj.get("__identity__") else { + bail!("missing __identity__ in response {ident_obj:?}"); + }; + let serde_json::Value::String(ident_str) = ident_str else { + bail!("expected __identity__ string, got {ident_str:?}"); + }; + identities.push(Identity::from_hex(ident_str)?); + } + Ok(identities) +} + +fn ensure_warehouses_per_database(warehouses_per_database: u16) -> Result<()> { + if warehouses_per_database == 0 { + bail!("warehouses_per_database must be positive"); + } + Ok(()) +} diff --git a/tools/tpcc-runner/src/tpcc.rs b/tools/tpcc-runner/src/tpcc.rs index 3e4ba0f587e..0b2658bcaf8 100644 --- a/tools/tpcc-runner/src/tpcc.rs +++ b/tools/tpcc-runner/src/tpcc.rs @@ -27,18 +27,10 @@ pub struct TerminalAssignment { pub district_id: u8, } -pub fn assign_terminal(terminal_id: u32, warehouse_count: u16) -> Option { - let zero = terminal_id.checked_sub(1)?; - let warehouse_zero = zero / u32::from(DISTRICTS_PER_WAREHOUSE); - if warehouse_zero >= u32::from(warehouse_count) { - return None; - } - let district_zero = zero % u32::from(DISTRICTS_PER_WAREHOUSE); - Some(TerminalAssignment { - terminal_id, - warehouse_id: (warehouse_zero + 1) as u16, - district_id: (district_zero + 1) as u8, - }) +pub fn terminal_id(warehouse_id: u16, district_id: u8) -> u32 { + debug_assert!(warehouse_id > 0); + debug_assert!((1..=DISTRICTS_PER_WAREHOUSE).contains(&district_id)); + (u32::from(warehouse_id) - 1) * u32::from(DISTRICTS_PER_WAREHOUSE) + u32::from(district_id) } pub fn choose_transaction(rng: &mut R) -> TransactionKind { From 24b4e62ec92640070aca39a6c5ded9cf50a1717a Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 11:44:20 +0530 Subject: [PATCH 11/38] unreviewed code --- crates/bindings-sys/src/lib.rs | 35 +++++++ crates/bindings/src/lib.rs | 1 + crates/bindings/src/remote_reducer.rs | 52 ++++++++++ crates/core/src/host/host_controller.rs | 17 +++- crates/core/src/host/instance_env.rs | 25 +++-- crates/core/src/host/mod.rs | 1 + crates/core/src/host/reducer_router.rs | 61 ++++++++++++ crates/core/src/replica_context.rs | 6 ++ .../tests/smoketests/cross_db_reducer.rs | 95 +++++++++++++++++++ crates/smoketests/tests/smoketests/mod.rs | 1 + 10 files changed, 285 insertions(+), 9 deletions(-) create mode 100644 crates/bindings/src/remote_reducer.rs create mode 100644 crates/core/src/host/reducer_router.rs create mode 100644 crates/smoketests/tests/smoketests/cross_db_reducer.rs diff --git a/crates/bindings-sys/src/lib.rs b/crates/bindings-sys/src/lib.rs index 79835e120a3..c9fb1849fb6 100644 --- a/crates/bindings-sys/src/lib.rs +++ b/crates/bindings-sys/src/lib.rs @@ -1471,6 +1471,41 @@ pub fn identity() -> [u8; 32] { buf } +/// Call a reducer on a remote database identified by `identity` (little-endian 32-byte array). +/// +/// On transport success (any HTTP response received): +/// - Returns `Ok((status, body_source))` where `status` is the HTTP status code and +/// `body_source` is a [`raw::BytesSource`] containing the raw response body bytes. +/// +/// On transport failure (connection refused, timeout, etc.): +/// - Returns `Err(err_source)` where `err_source` is a [`raw::BytesSource`] containing +/// a BSATN-encoded error [`String`]. +/// +/// Unlike HTTP requests, this syscall may be called while a transaction is open. +#[inline] +pub fn call_reducer_on_db( + identity: [u8; 32], + reducer_name: &str, + args: &[u8], +) -> Result<(u16, raw::BytesSource), raw::BytesSource> { + let mut out = raw::BytesSource::INVALID; + let status = unsafe { + raw::call_reducer_on_db( + identity.as_ptr(), + reducer_name.as_ptr(), + reducer_name.len() as u32, + args.as_ptr(), + args.len() as u32, + &mut out, + ) + }; + match Errno::from_code(status) { + None => Ok((status, out)), + Some(errno) if errno == Errno::HTTP_ERROR => Err(out), + Some(errno) => panic!("call_reducer_on_db: unexpected errno {errno}"), + } +} + /// Finds the JWT payload associated with `connection_id`. /// If nothing is found for the connection, this returns None. /// If a payload is found, this will return a valid [`raw::BytesSource`]. diff --git a/crates/bindings/src/lib.rs b/crates/bindings/src/lib.rs index 9e02a3a97f0..f91151ffb88 100644 --- a/crates/bindings/src/lib.rs +++ b/crates/bindings/src/lib.rs @@ -12,6 +12,7 @@ mod client_visibility_filter; pub mod http; pub mod log_stopwatch; mod logger; +pub mod remote_reducer; #[cfg(feature = "rand08")] mod rng; #[doc(hidden)] diff --git a/crates/bindings/src/remote_reducer.rs b/crates/bindings/src/remote_reducer.rs new file mode 100644 index 00000000000..80a71d3613f --- /dev/null +++ b/crates/bindings/src/remote_reducer.rs @@ -0,0 +1,52 @@ +//! Naive binding for calling reducers on remote SpacetimeDB databases. +//! +//! Call a reducer on another database using [`call_reducer_on_db`]. +//! +//! The args must be BSATN-encoded. The response body is raw bytes returned by +//! the remote database's HTTP handler. An HTTP status >= 400 does not cause an +//! `Err` return; only a transport failure (connection refused, timeout, …) does. +//! +//! # Example +//! +//! ```no_run +//! use spacetimedb::{remote_reducer, Identity}; +//! +//! #[spacetimedb::reducer] +//! fn call_remote(ctx: &spacetimedb::ReducerContext, target: Identity) { +//! // Empty BSATN args for a zero-argument reducer. +//! let args = spacetimedb::bsatn::to_vec(&()).unwrap(); +//! match remote_reducer::call_reducer_on_db(target, "my_reducer", &args) { +//! Ok((status, body)) => log::info!("status={status} body={body:?}"), +//! Err(msg) => log::error!("transport error: {msg}"), +//! } +//! } +//! ``` + +use crate::{rt::{read_bytes_source_as, read_bytes_source_into}, IterBuf, Identity}; + +/// Call a reducer on a remote database. +/// +/// - `database_identity`: the target database. +/// - `reducer_name`: the name of the reducer to invoke (must be valid UTF-8). +/// - `args`: BSATN-encoded reducer arguments. +/// +/// Returns `Ok((status, body))` on any transport success (including HTTP errors like 400/500). +/// Returns `Err(message)` on transport failure (connection refused, timeout, …). +pub fn call_reducer_on_db( + database_identity: Identity, + reducer_name: &str, + args: &[u8], +) -> Result<(u16, Vec), String> { + let identity_bytes = database_identity.to_byte_array(); + match spacetimedb_bindings_sys::call_reducer_on_db(identity_bytes, reducer_name, args) { + Ok((status, body_source)) => { + let mut buf = IterBuf::take(); + read_bytes_source_into(body_source, &mut buf); + Ok((status, buf.to_vec())) + } + Err(err_source) => { + let message = read_bytes_source_as::(err_source); + Err(message) + } + } +} diff --git a/crates/core/src/host/host_controller.rs b/crates/core/src/host/host_controller.rs index a813a1cb426..a2a18606614 100644 --- a/crates/core/src/host/host_controller.rs +++ b/crates/core/src/host/host_controller.rs @@ -13,6 +13,7 @@ use crate::host::v8::V8Runtime; use crate::host::ProcedureCallError; use crate::messages::control_db::{Database, HostType}; use crate::module_host_context::ModuleCreationContext; +use crate::host::reducer_router::{LocalReducerRouter, ReducerCallRouter}; use crate::replica_context::{CallReducerOnDbConfig, ReplicaContext}; use crate::subscription::module_subscription_actor::ModuleSubscriptions; use crate::subscription::module_subscription_manager::{spawn_send_worker, SubscriptionManager, TransactionOffset}; @@ -122,6 +123,11 @@ pub struct HostController { /// /// All per-replica clones share the same underlying connection pool. pub call_reducer_client: reqwest::Client, + /// Router that resolves the HTTP base URL of the leader node for a given database. + /// + /// Set to [`LocalReducerRouter`] by default; replaced with `ClusterReducerRouter` + /// in cluster deployments via [`HostController::new`] receiving the router directly. + pub call_reducer_router: Arc, } pub(crate) struct HostRuntimes { @@ -234,6 +240,7 @@ impl HostController { bsatn_rlb_pool: BsatnRowListBuilderPool::new(), db_cores, call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), + call_reducer_router: Arc::new(LocalReducerRouter::new("http://127.0.0.1:3000")), } } @@ -671,6 +678,7 @@ async fn make_replica_ctx( relational_db: Arc, bsatn_rlb_pool: BsatnRowListBuilderPool, call_reducer_client: reqwest::Client, + call_reducer_router: Arc, ) -> anyhow::Result { let logger = match module_logs { Some(path) => asyncify(move || Arc::new(DatabaseLogger::open_today(path))).await, @@ -704,6 +712,7 @@ async fn make_replica_ctx( logger, subscriptions, call_reducer_client, + call_reducer_router, }) } @@ -780,6 +789,7 @@ struct ModuleLauncher { core: AllocatedJobCore, bsatn_rlb_pool: BsatnRowListBuilderPool, call_reducer_client: reqwest::Client, + call_reducer_router: Arc, } impl ModuleLauncher { @@ -800,6 +810,7 @@ impl ModuleLauncher { self.relational_db, self.bsatn_rlb_pool, self.call_reducer_client, + self.call_reducer_router, ) .await .map(Arc::new)?; @@ -1002,6 +1013,7 @@ impl Host { core: host_controller.db_cores.take(), bsatn_rlb_pool: bsatn_rlb_pool.clone(), call_reducer_client: host_controller.call_reducer_client.clone(), + call_reducer_router: host_controller.call_reducer_router.clone(), } .launch_module() .await? @@ -1032,6 +1044,7 @@ impl Host { core: host_controller.db_cores.take(), bsatn_rlb_pool: bsatn_rlb_pool.clone(), call_reducer_client: host_controller.call_reducer_client.clone(), + call_reducer_router: host_controller.call_reducer_router.clone(), } .launch_module() .await; @@ -1056,6 +1069,7 @@ impl Host { core: host_controller.db_cores.take(), bsatn_rlb_pool: bsatn_rlb_pool.clone(), call_reducer_client: host_controller.call_reducer_client.clone(), + call_reducer_router: host_controller.call_reducer_router.clone(), } .launch_module() .await; @@ -1163,8 +1177,9 @@ impl Host { runtimes: runtimes.clone(), core, bsatn_rlb_pool, - // Transient validation-only module; build its own client with defaults. + // Transient validation-only module; build its own client and router with defaults. call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), + call_reducer_router: Arc::new(LocalReducerRouter::new("http://127.0.0.1:3000")), } .launch_module() .await diff --git a/crates/core/src/host/instance_env.rs b/crates/core/src/host/instance_env.rs index 641e1692485..f778621e470 100644 --- a/crates/core/src/host/instance_env.rs +++ b/crates/core/src/host/instance_env.rs @@ -983,8 +983,9 @@ impl InstanceEnv { /// Unlike [`Self::http_request`], this is explicitly allowed while a transaction is open — /// the caller is responsible for understanding the consistency implications. /// - /// Uses the warmed HTTP/2 client stored in [`ReplicaContext::call_reducer_client`], - /// configured when the replica was constructed. + /// Uses [`ReplicaContext::call_reducer_router`] to resolve the leader node for + /// `database_identity`, then sends the request via the warmed HTTP/2 client in + /// [`ReplicaContext::call_reducer_client`]. /// /// Returns `(http_status, response_body)` on transport success, /// or [`NodesError::HttpError`] if the connection itself fails. @@ -995,13 +996,20 @@ impl InstanceEnv { args: bytes::Bytes, ) -> impl Future> + use<> { let client = self.replica_ctx.call_reducer_client.clone(); - let url = format!( - "http://localhost/v1/database/{}/call/{}", - database_identity.to_hex(), - reducer_name, - ); + let router = self.replica_ctx.call_reducer_router.clone(); + let reducer_name = reducer_name.to_owned(); async move { + let base_url = router + .resolve_base_url(database_identity) + .await + .map_err(|e| NodesError::HttpError(e.to_string()))?; + let url = format!( + "{}/v1/database/{}/call/{}", + base_url, + database_identity.to_hex(), + reducer_name, + ); let response = client .post(&url) .header(http::header::CONTENT_TYPE, "application/octet-stream") @@ -1357,7 +1365,7 @@ mod test { tests_utils::{begin_mut_tx, with_auto_commit, with_read_only, TestDB}, RelationalDB, }, - host::Scheduler, + host::{reducer_router::LocalReducerRouter, Scheduler}, messages::control_db::{Database, HostType}, replica_context::{CallReducerOnDbConfig, ReplicaContext}, subscription::module_subscription_actor::ModuleSubscriptions, @@ -1394,6 +1402,7 @@ mod test { logger, subscriptions: subs, call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), + call_reducer_router: Arc::new(LocalReducerRouter::new("http://127.0.0.1:3000")), }, runtime, )) diff --git a/crates/core/src/host/mod.rs b/crates/core/src/host/mod.rs index 3123913fcf6..25e56ca217e 100644 --- a/crates/core/src/host/mod.rs +++ b/crates/core/src/host/mod.rs @@ -20,6 +20,7 @@ pub mod wasmtime; // Visible for integration testing. pub mod instance_env; +pub mod reducer_router; pub mod v8; // only pub for testing mod wasm_common; diff --git a/crates/core/src/host/reducer_router.rs b/crates/core/src/host/reducer_router.rs new file mode 100644 index 00000000000..93891ecc6e6 --- /dev/null +++ b/crates/core/src/host/reducer_router.rs @@ -0,0 +1,61 @@ +/// Trait for resolving which node to contact when calling a reducer on another database. +/// +/// Implementations differ between deployment modes: +/// +/// - **Standalone / single-node** — [`LocalReducerRouter`] always returns the local node's +/// HTTP base URL. Every database is on the same node, so there is nothing to resolve. +/// +/// - **Cluster / multi-node** — `ClusterReducerRouter` (private crate) queries the control DB +/// to discover the leader replica's node and returns that node's advertise address. +/// +/// The trait is object-safe (futures are boxed) so it can be stored as `Arc` +/// in [`crate::replica_context::ReplicaContext`] and swapped at startup. +use spacetimedb_lib::Identity; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; + +pub type BoxFuture<'a, T> = Pin + Send + 'a>>; + +pub trait ReducerCallRouter: Send + Sync + 'static { + /// Return the HTTP base URL (e.g. `"http://10.0.0.5:3000"`) of the node that + /// is the leader for `database_identity`. + /// + /// The caller appends `/v1/database/{identity}/call/{reducer}` to produce the full URL. + /// + /// # Errors + /// + /// Returns an error string when the leader cannot be resolved + /// (database not found, no leader elected yet, node has no network address, etc.). + fn resolve_base_url<'a>(&'a self, database_identity: Identity) -> BoxFuture<'a, anyhow::Result>; +} + +// Arc is itself a ReducerCallRouter. +impl ReducerCallRouter for Arc { + fn resolve_base_url<'a>(&'a self, database_identity: Identity) -> BoxFuture<'a, anyhow::Result> { + (**self).resolve_base_url(database_identity) + } +} + +/// Single-node implementation of [`ReducerCallRouter`]. +/// +/// Always routes to the same fixed base URL regardless of which database is targeted. +/// Suitable for standalone (single-node) deployments where every database is local. +/// +/// For cluster deployments, replace this with `ClusterReducerRouter` from the private crate. +pub struct LocalReducerRouter { + pub base_url: String, +} + +impl LocalReducerRouter { + pub fn new(base_url: impl Into) -> Self { + Self { base_url: base_url.into() } + } +} + +impl ReducerCallRouter for LocalReducerRouter { + fn resolve_base_url<'a>(&'a self, _database_identity: Identity) -> BoxFuture<'a, anyhow::Result> { + let url = self.base_url.clone(); + Box::pin(async move { Ok(url) }) + } +} diff --git a/crates/core/src/replica_context.rs b/crates/core/src/replica_context.rs index ede7fa44cdd..99208940d3a 100644 --- a/crates/core/src/replica_context.rs +++ b/crates/core/src/replica_context.rs @@ -3,6 +3,7 @@ use spacetimedb_commitlog::SizeOnDisk; use super::database_logger::DatabaseLogger; use crate::db::relational_db::RelationalDB; use crate::error::DBError; +use crate::host::reducer_router::ReducerCallRouter; use crate::messages::control_db::Database; use crate::subscription::module_subscription_actor::ModuleSubscriptions; use std::io; @@ -50,6 +51,11 @@ pub struct ReplicaContext { /// /// `reqwest::Client` is internally an `Arc`, so cloning `ReplicaContext` shares the pool. pub call_reducer_client: reqwest::Client, + /// Resolves the HTTP base URL of the leader node for a given database identity. + /// + /// - Standalone: always returns the local node URL ([`crate::host::reducer_router::LocalReducerRouter`]). + /// - Cluster: queries the control DB to find the leader replica's node. + pub call_reducer_router: Arc, } impl ReplicaContext { diff --git a/crates/smoketests/tests/smoketests/cross_db_reducer.rs b/crates/smoketests/tests/smoketests/cross_db_reducer.rs new file mode 100644 index 00000000000..9fcd7f3f563 --- /dev/null +++ b/crates/smoketests/tests/smoketests/cross_db_reducer.rs @@ -0,0 +1,95 @@ +use spacetimedb_smoketests::Smoketest; + +/// Module code used for both the "receiver" and "caller" databases. +/// +/// - `record_ping(message)` is called by the caller via `call_reducer_on_db` and stores the +/// message in `ping_log`. +/// - `call_remote(target, message)` is the entry point: it BSATN-encodes `message` and invokes +/// `record_ping` on `target` over the cross-DB ABI. +const MODULE_CODE: &str = r#" +use spacetimedb::{log, ReducerContext, Table, Identity}; + +#[spacetimedb::table(name = ping_log, public)] +pub struct PingLog { + #[primary_key] + #[auto_inc] + id: u64, + message: String, +} + +/// Writes one row to `ping_log` with the given message. Called via the cross-DB ABI. +#[spacetimedb::reducer] +pub fn record_ping(ctx: &ReducerContext, message: String) { + log::info!("record_ping: got message: {}", message); + ctx.db.ping_log().insert(PingLog { id: 0, message }); +} + +/// Calls `record_ping(message)` on `target` via the cross-database ABI. +/// +/// Args are BSATN-encoded as a 1-tuple `(message,)` — the same layout the host-side +/// `invoke_reducer` expects when decoding a single-`String` reducer. +#[spacetimedb::reducer] +pub fn call_remote(ctx: &ReducerContext, target: Identity, message: String) { + let args = spacetimedb::bsatn::to_vec(&(message,)).expect("failed to encode args"); + match spacetimedb::remote_reducer::call_reducer_on_db(target, "record_ping", &args) { + Ok((status, _body)) => { + log::info!("call_remote: got HTTP status {}", status); + } + Err(err) => { + log::error!("call_remote: transport failure: {}", err); + panic!("call_reducer_on_db transport failure: {err}"); + } + } +} +"#; + +/// Smoke test for the cross-database reducer call ABI. +/// +/// Publishes the same module twice on one server, then calls `call_remote` on the +/// "caller" database with the "receiver" database's identity as an argument. +/// Verifies that `receiver` has a new row in `ping_log` written by the cross-DB call. +#[test] +fn test_cross_db_reducer_call() { + let pid = std::process::id(); + let receiver_name = format!("cross-db-receiver-{pid}"); + let caller_name = format!("cross-db-caller-{pid}"); + + // Build one server with the shared module code. + let mut test = Smoketest::builder() + .module_code(MODULE_CODE) + .autopublish(false) + .build(); + + // Publish the receiver database first. + test.publish_module_named(&receiver_name, false) + .expect("failed to publish receiver module"); + let receiver_identity = test + .database_identity + .clone() + .expect("receiver database_identity not set after publish"); + + // Publish the caller database (same code, different name). + test.publish_module_named(&caller_name, false) + .expect("failed to publish caller module"); + // test.database_identity is now caller_name — calls/sql default to caller. + + // Invoke call_remote on the caller, passing the receiver's identity and a test message. + test.call("call_remote", &[&receiver_identity, "hello from caller"]) + .expect("call_remote failed"); + + // Verify that the receiver's ping_log has the expected message row. + let result = test + .spacetime(&[ + "sql", + "--server", + &test.server_url, + &receiver_identity, + "SELECT message FROM ping_log", + ]) + .expect("sql query failed"); + + assert!( + result.contains("hello from caller"), + "Expected ping_log to contain 'hello from caller' after cross-DB call, got:\n{result}" + ); +} diff --git a/crates/smoketests/tests/smoketests/mod.rs b/crates/smoketests/tests/smoketests/mod.rs index f5053652dd3..d046c795d88 100644 --- a/crates/smoketests/tests/smoketests/mod.rs +++ b/crates/smoketests/tests/smoketests/mod.rs @@ -4,6 +4,7 @@ mod auto_inc; mod auto_migration; mod call; mod change_host_type; +mod cross_db_reducer; mod cli; mod client_connection_errors; mod confirmed_reads; From 279d86fa79e3e5649442b671737958c78bcb8460 Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 13:01:19 +0530 Subject: [PATCH 12/38] run smoketests --- crates/bindings-sys/src/lib.rs | 12 +- crates/bindings/src/remote_reducer.rs | 12 +- crates/core/src/host/wasm_common.rs | 11 +- .../src/host/wasmtime/wasm_instance_env.rs | 108 +++++++++--------- crates/core/src/replica_context.rs | 3 +- .../tests/smoketests/cross_db_reducer.rs | 10 +- crates/standalone/src/lib.rs | 16 ++- crates/standalone/src/subcommands/start.rs | 48 ++++---- crates/testing/src/modules.rs | 2 + 9 files changed, 122 insertions(+), 100 deletions(-) diff --git a/crates/bindings-sys/src/lib.rs b/crates/bindings-sys/src/lib.rs index c9fb1849fb6..1bad2a8abff 100644 --- a/crates/bindings-sys/src/lib.rs +++ b/crates/bindings-sys/src/lib.rs @@ -1499,10 +1499,14 @@ pub fn call_reducer_on_db( &mut out, ) }; - match Errno::from_code(status) { - None => Ok((status, out)), - Some(errno) if errno == Errno::HTTP_ERROR => Err(out), - Some(errno) => panic!("call_reducer_on_db: unexpected errno {errno}"), + // The raw ABI returns either the HTTP status code (100-599) or HTTP_ERROR errno + // on transport failure. Unlike other ABI functions, a non-zero return value here + // does NOT indicate a generic errno — it's the HTTP status code. Only HTTP_ERROR + // specifically signals a transport-level failure. + if status == Errno::HTTP_ERROR.code() { + Err(out) + } else { + Ok((status, out)) } } diff --git a/crates/bindings/src/remote_reducer.rs b/crates/bindings/src/remote_reducer.rs index 80a71d3613f..e0b68924458 100644 --- a/crates/bindings/src/remote_reducer.rs +++ b/crates/bindings/src/remote_reducer.rs @@ -40,9 +40,15 @@ pub fn call_reducer_on_db( let identity_bytes = database_identity.to_byte_array(); match spacetimedb_bindings_sys::call_reducer_on_db(identity_bytes, reducer_name, args) { Ok((status, body_source)) => { - let mut buf = IterBuf::take(); - read_bytes_source_into(body_source, &mut buf); - Ok((status, buf.to_vec())) + // INVALID signals an empty body (host optimization to avoid allocation). + let body = if body_source == spacetimedb_bindings_sys::raw::BytesSource::INVALID { + Vec::new() + } else { + let mut buf = IterBuf::take(); + read_bytes_source_into(body_source, &mut buf); + buf.to_vec() + }; + Ok((status, body)) } Err(err_source) => { let message = read_bytes_source_as::(err_source); diff --git a/crates/core/src/host/wasm_common.rs b/crates/core/src/host/wasm_common.rs index 2c0c5fa57e6..b5bba032d7c 100644 --- a/crates/core/src/host/wasm_common.rs +++ b/crates/core/src/host/wasm_common.rs @@ -433,13 +433,16 @@ macro_rules! abi_funcs { } + $link_sync! { + // Call a reducer on another database while holding (or not holding) a transaction. + // Implemented as a sync host function (using block_in_place) so it can be called + // from within a reducer body where only synchronous host functions are allowed. + "spacetime_10.5"::call_reducer_on_db, + } + $link_async! { "spacetime_10.3"::procedure_sleep_until, "spacetime_10.3"::procedure_http_request, - - // Call a reducer on another database while holding (or not holding) a transaction. - // Uses a warmed HTTP/2 connection pool to localhost:80. - "spacetime_10.5"::call_reducer_on_db, } }; } diff --git a/crates/core/src/host/wasmtime/wasm_instance_env.rs b/crates/core/src/host/wasmtime/wasm_instance_env.rs index 868aa8b2ea5..87182c30be2 100644 --- a/crates/core/src/host/wasmtime/wasm_instance_env.rs +++ b/crates/core/src/host/wasmtime/wasm_instance_env.rs @@ -1965,64 +1965,60 @@ impl WasmInstanceEnv { /// # Traps /// /// Traps if any pointer is NULL or its range falls outside of linear memory. - pub fn call_reducer_on_db<'caller>( - caller: Caller<'caller, Self>, - (identity_ptr, reducer_ptr, reducer_len, args_ptr, args_len, out): ( - WasmPtr, - WasmPtr, - u32, - WasmPtr, - u32, - WasmPtr, - ), - ) -> Fut<'caller, RtResult> { - Self::async_with_span(caller, AbiCall::CallReducerOnDb, move |mut caller| async move { - let (mem, env) = Self::mem_env(&mut caller); - - #[allow(clippy::redundant_closure_call)] - let res = (async move || { - // Read the 32-byte BSATN-encoded Identity (little-endian). - let identity_slice = mem.deref_slice(identity_ptr, 32)?; - let identity_bytes: [u8; 32] = identity_slice - .try_into() - .expect("deref_slice(ptr, 32) always yields exactly 32 bytes"); - let database_identity = Identity::from_byte_array(identity_bytes); - - // Read the reducer name as a UTF-8 string. - let reducer_name = mem.deref_str(reducer_ptr, reducer_len)?; - - // Read the BSATN-encoded args as raw bytes. - let args_buf = mem.deref_slice(args_ptr, args_len)?; - let args = bytes::Bytes::copy_from_slice(args_buf); - - let result = env - .instance_env - .call_reducer_on_db(database_identity, reducer_name, args) - .await; + pub fn call_reducer_on_db( + caller: Caller<'_, Self>, + identity_ptr: WasmPtr, + reducer_ptr: WasmPtr, + reducer_len: u32, + args_ptr: WasmPtr, + args_len: u32, + out: WasmPtr, + ) -> RtResult { + Self::cvt_custom(caller, AbiCall::CallReducerOnDb, |caller| { + let (mem, env) = Self::mem_env(caller); - match result { - Ok((status, body)) => { - let bytes_source = WasmInstanceEnv::create_bytes_source(env, body)?; - bytes_source.0.write_to(mem, out)?; - Ok(status as u32) - } - Err(NodesError::HttpError(err)) => { - let err_bytes = bsatn::to_vec(&err).with_context(|| { - format!("Failed to BSATN-serialize call_reducer_on_db transport error: {err:?}") - })?; - let bytes_source = WasmInstanceEnv::create_bytes_source(env, err_bytes.into())?; - bytes_source.0.write_to(mem, out)?; - Ok(errno::HTTP_ERROR.get() as u32) - } - Err(e) => Err(WasmError::Db(e)), + // Read the 32-byte BSATN-encoded Identity (little-endian). + let identity_slice = mem.deref_slice(identity_ptr, 32)?; + let identity_bytes: [u8; 32] = identity_slice + .try_into() + .expect("deref_slice(ptr, 32) always yields exactly 32 bytes"); + let database_identity = Identity::from_byte_array(identity_bytes); + + // Read the reducer name as a UTF-8 string. + let reducer_name = mem.deref_str(reducer_ptr, reducer_len)?.to_owned(); + + // Read the BSATN-encoded args as raw bytes. + let args_buf = mem.deref_slice(args_ptr, args_len)?; + let args = bytes::Bytes::copy_from_slice(args_buf); + + // Reducers run inside a tokio LocalSet (single-threaded), so block_in_place + // is unavailable and futures::executor::block_on can't drive tokio I/O. + // Spawn a new OS thread and call Handle::block_on from there, which is + // designed to be called from synchronous (non-async) contexts. + let handle = tokio::runtime::Handle::current(); + let fut = env.instance_env.call_reducer_on_db(database_identity, &reducer_name, args); + let result = std::thread::scope(|s| { + s.spawn(|| handle.block_on(fut)) + .join() + .expect("call_reducer_on_db: worker thread panicked") + }); + + match result { + Ok((status, body)) => { + let bytes_source = WasmInstanceEnv::create_bytes_source(env, body)?; + bytes_source.0.write_to(mem, out)?; + Ok(status as u32) } - })() - .await; - - ( - caller, - res.or_else(|err| Self::convert_wasm_result(AbiCall::CallReducerOnDb, err)), - ) + Err(NodesError::HttpError(err)) => { + let err_bytes = bsatn::to_vec(&err).with_context(|| { + format!("Failed to BSATN-serialize call_reducer_on_db transport error: {err:?}") + })?; + let bytes_source = WasmInstanceEnv::create_bytes_source(env, err_bytes.into())?; + bytes_source.0.write_to(mem, out)?; + Ok(errno::HTTP_ERROR.get() as u32) + } + Err(e) => Err(WasmError::Db(e)), + } }) } } diff --git a/crates/core/src/replica_context.rs b/crates/core/src/replica_context.rs index 99208940d3a..f4e20dcd4d7 100644 --- a/crates/core/src/replica_context.rs +++ b/crates/core/src/replica_context.rs @@ -62,13 +62,12 @@ impl ReplicaContext { /// Build a warmed `reqwest::Client` from `config`. pub fn new_call_reducer_client(config: &CallReducerOnDbConfig) -> reqwest::Client { reqwest::Client::builder() - .http2_prior_knowledge() .tcp_keepalive(config.tcp_keepalive) .pool_idle_timeout(config.pool_idle_timeout) .pool_max_idle_per_host(config.pool_max_idle_per_host) .timeout(config.request_timeout) .build() - .expect("failed to build call_reducer_on_db HTTP/2 client") + .expect("failed to build call_reducer_on_db HTTP client") } } diff --git a/crates/smoketests/tests/smoketests/cross_db_reducer.rs b/crates/smoketests/tests/smoketests/cross_db_reducer.rs index 9fcd7f3f563..8bfb88f34b7 100644 --- a/crates/smoketests/tests/smoketests/cross_db_reducer.rs +++ b/crates/smoketests/tests/smoketests/cross_db_reducer.rs @@ -9,7 +9,7 @@ use spacetimedb_smoketests::Smoketest; const MODULE_CODE: &str = r#" use spacetimedb::{log, ReducerContext, Table, Identity}; -#[spacetimedb::table(name = ping_log, public)] +#[spacetimedb::table(accessor = ping_log, public)] pub struct PingLog { #[primary_key] #[auto_inc] @@ -24,13 +24,15 @@ pub fn record_ping(ctx: &ReducerContext, message: String) { ctx.db.ping_log().insert(PingLog { id: 0, message }); } -/// Calls `record_ping(message)` on `target` via the cross-database ABI. +/// Calls `record_ping(message)` on `target_hex` via the cross-database ABI. /// +/// `target_hex` is the hex-encoded identity of the target database. /// Args are BSATN-encoded as a 1-tuple `(message,)` — the same layout the host-side /// `invoke_reducer` expects when decoding a single-`String` reducer. #[spacetimedb::reducer] -pub fn call_remote(ctx: &ReducerContext, target: Identity, message: String) { - let args = spacetimedb::bsatn::to_vec(&(message,)).expect("failed to encode args"); +pub fn call_remote(ctx: &ReducerContext, target_hex: String, message: String) { + let target = Identity::from_hex(&target_hex).expect("invalid target identity hex"); + let args = spacetimedb::spacetimedb_lib::bsatn::to_vec(&(message,)).expect("failed to encode args"); match spacetimedb::remote_reducer::call_reducer_on_db(target, "record_ping", &args) { Ok((status, _body)) => { log::info!("call_remote: got HTTP status {}", status); diff --git a/crates/standalone/src/lib.rs b/crates/standalone/src/lib.rs index 3343a9292b1..3b97c6d46fd 100644 --- a/crates/standalone/src/lib.rs +++ b/crates/standalone/src/lib.rs @@ -14,7 +14,9 @@ use spacetimedb::config::{CertificateAuthority, MetadataFile, V8HeapPolicyConfig use spacetimedb::db; use spacetimedb::db::persistence::LocalPersistenceProvider; use spacetimedb::energy::{EnergyBalance, EnergyQuanta, NullEnergyMonitor}; -use spacetimedb::host::{DiskStorage, HostController, MigratePlanResult, UpdateDatabaseResult}; +use spacetimedb::host::{ + reducer_router::LocalReducerRouter, DiskStorage, HostController, MigratePlanResult, UpdateDatabaseResult, +}; use spacetimedb::identity::{AuthCtx, Identity}; use spacetimedb::messages::control_db::{Database, Node, Replica}; use spacetimedb::subscription::row_list_builder_pool::BsatnRowListBuilderPool; @@ -38,11 +40,15 @@ use std::time::Duration; pub use spacetimedb_client_api::routes::subscribe::{BIN_PROTOCOL, TEXT_PROTOCOL}; -#[derive(Clone, Copy)] +#[derive(Clone)] pub struct StandaloneOptions { pub db_config: db::Config, pub websocket: WebSocketOptions, pub v8_heap_policy: V8HeapPolicyConfig, + /// HTTP base URL of this node's API server (e.g. `"http://127.0.0.1:3000"`). + /// Used to configure the `LocalReducerRouter` so that cross-DB reducer calls + /// reach the correct address when the server listens on a dynamic port. + pub local_api_url: String, } pub struct StandaloneEnv { @@ -76,7 +82,7 @@ impl StandaloneEnv { let program_store = Arc::new(DiskStorage::new(data_dir.program_bytes().0).await?); let persistence_provider = Arc::new(LocalPersistenceProvider::new(data_dir.clone())); - let host_controller = HostController::new( + let mut host_controller = HostController::new( data_dir, config.db_config, config.v8_heap_policy, @@ -85,6 +91,7 @@ impl StandaloneEnv { persistence_provider, db_cores, ); + host_controller.call_reducer_router = Arc::new(LocalReducerRouter::new(config.local_api_url)); let client_actor_index = ClientActorIndex::new(); let jwt_keys = certs.get_or_create_keys()?; @@ -651,9 +658,10 @@ mod tests { }, websocket: WebSocketOptions::default(), v8_heap_policy: V8HeapPolicyConfig::default(), + local_api_url: "http://127.0.0.1:3000".to_owned(), }; - let _env = StandaloneEnv::init(config, &ca, data_dir.clone(), JobCores::without_pinned_cores()).await?; + let _env = StandaloneEnv::init(config.clone(), &ca, data_dir.clone(), JobCores::without_pinned_cores()).await?; // Ensure that we have a lock. assert!( StandaloneEnv::init(config, &ca, data_dir.clone(), JobCores::without_pinned_cores()) diff --git a/crates/standalone/src/subcommands/start.rs b/crates/standalone/src/subcommands/start.rs index 399b7e9a350..30eb64c5a6c 100644 --- a/crates/standalone/src/subcommands/start.rs +++ b/crates/standalone/src/subcommands/start.rs @@ -177,29 +177,6 @@ pub async fn exec(args: &ArgMatches, db_cores: JobCores) -> anyhow::Result<()> { .or_else(|| cert_dir.map(CertificateAuthority::in_cli_config_dir)) .context("cannot omit --jwt-{pub,priv}-key-path when those options are not specified in config.toml")?; - let data_dir = Arc::new(data_dir.clone()); - let ctx = StandaloneEnv::init( - StandaloneOptions { - db_config, - websocket: config.websocket, - v8_heap_policy: config.common.v8_heap_policy, - }, - &certs, - data_dir, - db_cores, - ) - .await?; - worker_metrics::spawn_jemalloc_stats(listen_addr.clone()); - worker_metrics::spawn_tokio_stats(listen_addr.clone()); - worker_metrics::spawn_page_pool_stats(listen_addr.clone(), ctx.page_pool().clone()); - worker_metrics::spawn_bsatn_rlb_pool_stats(listen_addr.clone(), ctx.bsatn_rlb_pool().clone()); - let mut db_routes = DatabaseRoutes::default(); - db_routes.root_post = db_routes.root_post.layer(DefaultBodyLimit::disable()); - db_routes.db_put = db_routes.db_put.layer(DefaultBodyLimit::disable()); - db_routes.pre_publish = db_routes.pre_publish.layer(DefaultBodyLimit::disable()); - let extra = axum::Router::new().nest("/health", spacetimedb_client_api::routes::health::router()); - let service = router(&ctx, db_routes, IdentityRoutes::default(), extra).with_state(ctx.clone()); - // Check if the requested port is available on both IPv4 and IPv6. // If not, offer to find an available port by incrementing (unless non-interactive). let listen_addr = if let Some((host, port_str)) = listen_addr.rsplit_once(':') { @@ -251,6 +228,31 @@ pub async fn exec(args: &ArgMatches, db_cores: JobCores) -> anyhow::Result<()> { socket2::SockRef::from(&tcp).set_nodelay(true)?; log::info!("Starting SpacetimeDB listening on {}", tcp.local_addr()?); + let local_port = tcp.local_addr()?.port(); + let data_dir = Arc::new(data_dir.clone()); + let ctx = StandaloneEnv::init( + StandaloneOptions { + db_config, + websocket: config.websocket, + v8_heap_policy: config.common.v8_heap_policy, + local_api_url: format!("http://127.0.0.1:{local_port}"), + }, + &certs, + data_dir, + db_cores, + ) + .await?; + worker_metrics::spawn_jemalloc_stats(listen_addr.clone()); + worker_metrics::spawn_tokio_stats(listen_addr.clone()); + worker_metrics::spawn_page_pool_stats(listen_addr.clone(), ctx.page_pool().clone()); + worker_metrics::spawn_bsatn_rlb_pool_stats(listen_addr.clone(), ctx.bsatn_rlb_pool().clone()); + let mut db_routes = DatabaseRoutes::default(); + db_routes.root_post = db_routes.root_post.layer(DefaultBodyLimit::disable()); + db_routes.db_put = db_routes.db_put.layer(DefaultBodyLimit::disable()); + db_routes.pre_publish = db_routes.pre_publish.layer(DefaultBodyLimit::disable()); + let extra = axum::Router::new().nest("/health", spacetimedb_client_api::routes::health::router()); + let service = router(&ctx, db_routes, IdentityRoutes::default(), extra).with_state(ctx.clone()); + if let Some(pg_port) = pg_port { let server_addr = listen_addr.split(':').next().unwrap(); let tcp_pg = TcpListener::bind(format!("{server_addr}:{pg_port}")).await.context(format!( diff --git a/crates/testing/src/modules.rs b/crates/testing/src/modules.rs index 318fff8d0cf..0721409c64b 100644 --- a/crates/testing/src/modules.rs +++ b/crates/testing/src/modules.rs @@ -202,6 +202,8 @@ impl CompiledModule { db_config: config, websocket: WebSocketOptions::default(), v8_heap_policy: Default::default(), + // Tests use internal routing; cross-DB HTTP calls aren't tested here. + local_api_url: "http://127.0.0.1:3000".to_owned(), }, &certs, paths.data_dir.into(), From cd3e25bfe9d1a5b0ee8e9cc5d72f2e57b91fea91 Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 15:14:47 +0530 Subject: [PATCH 13/38] token --- crates/core/src/host/host_controller.rs | 19 +++++++ crates/core/src/host/instance_env.rs | 16 ++++-- crates/core/src/replica_context.rs | 8 +++ .../tests/smoketests/cross_db_reducer.rs | 55 +++++++++++-------- crates/standalone/src/lib.rs | 2 +- 5 files changed, 72 insertions(+), 28 deletions(-) diff --git a/crates/core/src/host/host_controller.rs b/crates/core/src/host/host_controller.rs index a2a18606614..5d8123ee427 100644 --- a/crates/core/src/host/host_controller.rs +++ b/crates/core/src/host/host_controller.rs @@ -128,6 +128,16 @@ pub struct HostController { /// Set to [`LocalReducerRouter`] by default; replaced with `ClusterReducerRouter` /// in cluster deployments via [`HostController::new`] receiving the router directly. pub call_reducer_router: Arc, + /// A single node-level Bearer token included in all outgoing cross-DB reducer calls. + /// + /// Set once at node startup by the deployment layer (standalone / cluster) so that + /// `anon_auth_middleware` on the target node accepts the request without generating a + /// fresh ephemeral identity on every call. All replicas on this node share the same + /// token — the target only needs proof that the caller is a legitimate node, not which + /// specific database initiated the call. + /// + /// `None` in test/embedded contexts where no JWT signer is configured. + pub call_reducer_auth_token: Option, } pub(crate) struct HostRuntimes { @@ -241,6 +251,7 @@ impl HostController { db_cores, call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), call_reducer_router: Arc::new(LocalReducerRouter::new("http://127.0.0.1:3000")), + call_reducer_auth_token: None, } } @@ -679,6 +690,7 @@ async fn make_replica_ctx( bsatn_rlb_pool: BsatnRowListBuilderPool, call_reducer_client: reqwest::Client, call_reducer_router: Arc, + call_reducer_auth_token: Option, ) -> anyhow::Result { let logger = match module_logs { Some(path) => asyncify(move || Arc::new(DatabaseLogger::open_today(path))).await, @@ -713,6 +725,7 @@ async fn make_replica_ctx( subscriptions, call_reducer_client, call_reducer_router, + call_reducer_auth_token, }) } @@ -790,6 +803,7 @@ struct ModuleLauncher { bsatn_rlb_pool: BsatnRowListBuilderPool, call_reducer_client: reqwest::Client, call_reducer_router: Arc, + call_reducer_auth_token: Option, } impl ModuleLauncher { @@ -811,6 +825,7 @@ impl ModuleLauncher { self.bsatn_rlb_pool, self.call_reducer_client, self.call_reducer_router, + self.call_reducer_auth_token, ) .await .map(Arc::new)?; @@ -1014,6 +1029,7 @@ impl Host { bsatn_rlb_pool: bsatn_rlb_pool.clone(), call_reducer_client: host_controller.call_reducer_client.clone(), call_reducer_router: host_controller.call_reducer_router.clone(), + call_reducer_auth_token: host_controller.call_reducer_auth_token.clone(), } .launch_module() .await? @@ -1045,6 +1061,7 @@ impl Host { bsatn_rlb_pool: bsatn_rlb_pool.clone(), call_reducer_client: host_controller.call_reducer_client.clone(), call_reducer_router: host_controller.call_reducer_router.clone(), + call_reducer_auth_token: host_controller.call_reducer_auth_token.clone(), } .launch_module() .await; @@ -1070,6 +1087,7 @@ impl Host { bsatn_rlb_pool: bsatn_rlb_pool.clone(), call_reducer_client: host_controller.call_reducer_client.clone(), call_reducer_router: host_controller.call_reducer_router.clone(), + call_reducer_auth_token: host_controller.call_reducer_auth_token.clone(), } .launch_module() .await; @@ -1180,6 +1198,7 @@ impl Host { // Transient validation-only module; build its own client and router with defaults. call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), call_reducer_router: Arc::new(LocalReducerRouter::new("http://127.0.0.1:3000")), + call_reducer_auth_token: None, } .launch_module() .await diff --git a/crates/core/src/host/instance_env.rs b/crates/core/src/host/instance_env.rs index f778621e470..2684e60a85a 100644 --- a/crates/core/src/host/instance_env.rs +++ b/crates/core/src/host/instance_env.rs @@ -998,6 +998,10 @@ impl InstanceEnv { let client = self.replica_ctx.call_reducer_client.clone(); let router = self.replica_ctx.call_reducer_router.clone(); let reducer_name = reducer_name.to_owned(); + // Node-level auth token: a single token minted at startup and shared by all replicas + // on this node. Passed as a Bearer token so `anon_auth_middleware` on the target node + // accepts the request without generating a fresh ephemeral identity per call. + let auth_token = self.replica_ctx.call_reducer_auth_token.clone(); async move { let base_url = router @@ -1010,13 +1014,14 @@ impl InstanceEnv { database_identity.to_hex(), reducer_name, ); - let response = client + let mut req = client .post(&url) .header(http::header::CONTENT_TYPE, "application/octet-stream") - .body(args) - .send() - .await - .map_err(|e| NodesError::HttpError(e.to_string()))?; + .body(args); + if let Some(token) = auth_token { + req = req.header(http::header::AUTHORIZATION, format!("Bearer {token}")); + } + let response = req.send().await.map_err(|e| NodesError::HttpError(e.to_string()))?; let status = response.status().as_u16(); let body = response @@ -1403,6 +1408,7 @@ mod test { subscriptions: subs, call_reducer_client: ReplicaContext::new_call_reducer_client(&CallReducerOnDbConfig::default()), call_reducer_router: Arc::new(LocalReducerRouter::new("http://127.0.0.1:3000")), + call_reducer_auth_token: None, }, runtime, )) diff --git a/crates/core/src/replica_context.rs b/crates/core/src/replica_context.rs index f4e20dcd4d7..2ecec56273e 100644 --- a/crates/core/src/replica_context.rs +++ b/crates/core/src/replica_context.rs @@ -56,6 +56,14 @@ pub struct ReplicaContext { /// - Standalone: always returns the local node URL ([`crate::host::reducer_router::LocalReducerRouter`]). /// - Cluster: queries the control DB to find the leader replica's node. pub call_reducer_router: Arc, + /// Pre-signed `Authorization: Bearer ` value for outgoing cross-DB reducer calls. + /// + /// Set at replica launch time by the deployment layer (standalone / cluster) using the + /// local JWT signing key. The token identifies this database as the caller, so the target + /// reducer sees a stable, verifiable identity instead of an anonymous ephemeral one. + /// + /// `None` in contexts where no JWT signer is configured (e.g. unit tests). + pub call_reducer_auth_token: Option, } impl ReplicaContext { diff --git a/crates/smoketests/tests/smoketests/cross_db_reducer.rs b/crates/smoketests/tests/smoketests/cross_db_reducer.rs index 8bfb88f34b7..f4d31364333 100644 --- a/crates/smoketests/tests/smoketests/cross_db_reducer.rs +++ b/crates/smoketests/tests/smoketests/cross_db_reducer.rs @@ -2,12 +2,19 @@ use spacetimedb_smoketests::Smoketest; /// Module code used for both the "receiver" and "caller" databases. /// -/// - `record_ping(message)` is called by the caller via `call_reducer_on_db` and stores the -/// message in `ping_log`. -/// - `call_remote(target, message)` is the entry point: it BSATN-encodes `message` and invokes +/// - `record_ping(payload)` is called by the caller via `call_reducer_on_db` and stores the +/// payload fields in `ping_log`. +/// - `call_remote(target, payload)` is the entry point: it BSATN-encodes `payload` and invokes /// `record_ping` on `target` over the cross-DB ABI. const MODULE_CODE: &str = r#" -use spacetimedb::{log, ReducerContext, Table, Identity}; +use spacetimedb::{log, ReducerContext, Table, Identity, SpacetimeType}; + +/// A structured ping payload — used to exercise BSATN encoding of a multi-field struct. +#[derive(SpacetimeType)] +pub struct PingPayload { + pub message: String, + pub priority: u32, +} #[spacetimedb::table(accessor = ping_log, public)] pub struct PingLog { @@ -15,24 +22,25 @@ pub struct PingLog { #[auto_inc] id: u64, message: String, + priority: u32, } -/// Writes one row to `ping_log` with the given message. Called via the cross-DB ABI. +/// Writes one row to `ping_log` from the payload. Called via the cross-DB ABI. #[spacetimedb::reducer] -pub fn record_ping(ctx: &ReducerContext, message: String) { - log::info!("record_ping: got message: {}", message); - ctx.db.ping_log().insert(PingLog { id: 0, message }); +pub fn record_ping(ctx: &ReducerContext, payload: PingPayload) { + log::info!("record_ping: got message={} priority={}", payload.message, payload.priority); + ctx.db.ping_log().insert(PingLog { id: 0, message: payload.message, priority: payload.priority }); } -/// Calls `record_ping(message)` on `target_hex` via the cross-database ABI. +/// Calls `record_ping(payload)` on `target_hex` via the cross-database ABI. /// /// `target_hex` is the hex-encoded identity of the target database. -/// Args are BSATN-encoded as a 1-tuple `(message,)` — the same layout the host-side -/// `invoke_reducer` expects when decoding a single-`String` reducer. +/// Args are BSATN-encoded as a 1-tuple `(payload,)`. #[spacetimedb::reducer] -pub fn call_remote(ctx: &ReducerContext, target_hex: String, message: String) { +pub fn call_remote(ctx: &ReducerContext, target_hex: String, message: String, priority: u32) { let target = Identity::from_hex(&target_hex).expect("invalid target identity hex"); - let args = spacetimedb::spacetimedb_lib::bsatn::to_vec(&(message,)).expect("failed to encode args"); + let payload = PingPayload { message, priority }; + let args = spacetimedb::spacetimedb_lib::bsatn::to_vec(&(payload,)).expect("failed to encode args"); match spacetimedb::remote_reducer::call_reducer_on_db(target, "record_ping", &args) { Ok((status, _body)) => { log::info!("call_remote: got HTTP status {}", status); @@ -49,7 +57,9 @@ pub fn call_remote(ctx: &ReducerContext, target_hex: String, message: String) { /// /// Publishes the same module twice on one server, then calls `call_remote` on the /// "caller" database with the "receiver" database's identity as an argument. -/// Verifies that `receiver` has a new row in `ping_log` written by the cross-DB call. +/// Passes a structured `PingPayload` (message + priority) to exercise multi-field +/// BSATN encoding over the cross-DB boundary. +/// Verifies that `receiver` has the expected row in `ping_log`. #[test] fn test_cross_db_reducer_call() { let pid = std::process::id(); @@ -57,10 +67,7 @@ fn test_cross_db_reducer_call() { let caller_name = format!("cross-db-caller-{pid}"); // Build one server with the shared module code. - let mut test = Smoketest::builder() - .module_code(MODULE_CODE) - .autopublish(false) - .build(); + let mut test = Smoketest::builder().module_code(MODULE_CODE).autopublish(false).build(); // Publish the receiver database first. test.publish_module_named(&receiver_name, false) @@ -75,18 +82,18 @@ fn test_cross_db_reducer_call() { .expect("failed to publish caller module"); // test.database_identity is now caller_name — calls/sql default to caller. - // Invoke call_remote on the caller, passing the receiver's identity and a test message. - test.call("call_remote", &[&receiver_identity, "hello from caller"]) + // Invoke call_remote on the caller, passing the receiver's identity, message, and priority. + test.call("call_remote", &[&receiver_identity, "hello from caller", "42"]) .expect("call_remote failed"); - // Verify that the receiver's ping_log has the expected message row. + // Verify that the receiver's ping_log has the expected row. let result = test .spacetime(&[ "sql", "--server", &test.server_url, &receiver_identity, - "SELECT message FROM ping_log", + "SELECT message, priority FROM ping_log", ]) .expect("sql query failed"); @@ -94,4 +101,8 @@ fn test_cross_db_reducer_call() { result.contains("hello from caller"), "Expected ping_log to contain 'hello from caller' after cross-DB call, got:\n{result}" ); + assert!( + result.contains("42"), + "Expected ping_log to contain priority 42 after cross-DB call, got:\n{result}" + ); } diff --git a/crates/standalone/src/lib.rs b/crates/standalone/src/lib.rs index 3b97c6d46fd..6772703c379 100644 --- a/crates/standalone/src/lib.rs +++ b/crates/standalone/src/lib.rs @@ -22,7 +22,7 @@ use spacetimedb::messages::control_db::{Database, Node, Replica}; use spacetimedb::subscription::row_list_builder_pool::BsatnRowListBuilderPool; use spacetimedb::util::jobs::JobCores; use spacetimedb::worker_metrics::WORKER_METRICS; -use spacetimedb_client_api::auth::{self, LOCALHOST}; +use spacetimedb_client_api::auth::{self, JwtAuthProvider, LOCALHOST}; use spacetimedb_client_api::routes::subscribe::{HasWebSocketOptions, WebSocketOptions}; use spacetimedb_client_api::{ControlStateReadAccess, DatabaseResetDef, Host, NodeDelegate}; use spacetimedb_client_api_messages::name::{ From 850ca801b3f025325b7eaa6664e2d5ef42c20778 Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 17:23:52 +0530 Subject: [PATCH 14/38] fmt --- crates/bindings/src/remote_reducer.rs | 5 ++++- crates/core/src/host/host_controller.rs | 3 ++- crates/core/src/host/reducer_router.rs | 4 +++- crates/core/src/host/wasmtime/wasm_instance_env.rs | 4 +++- crates/smoketests/tests/smoketests/mod.rs | 2 +- crates/standalone/src/lib.rs | 2 +- 6 files changed, 14 insertions(+), 6 deletions(-) diff --git a/crates/bindings/src/remote_reducer.rs b/crates/bindings/src/remote_reducer.rs index e0b68924458..e004f495aaa 100644 --- a/crates/bindings/src/remote_reducer.rs +++ b/crates/bindings/src/remote_reducer.rs @@ -22,7 +22,10 @@ //! } //! ``` -use crate::{rt::{read_bytes_source_as, read_bytes_source_into}, IterBuf, Identity}; +use crate::{ + rt::{read_bytes_source_as, read_bytes_source_into}, + Identity, IterBuf, +}; /// Call a reducer on a remote database. /// diff --git a/crates/core/src/host/host_controller.rs b/crates/core/src/host/host_controller.rs index 5d8123ee427..dd774111b86 100644 --- a/crates/core/src/host/host_controller.rs +++ b/crates/core/src/host/host_controller.rs @@ -9,11 +9,11 @@ use crate::db::persistence::PersistenceProvider; use crate::db::relational_db::{self, spawn_view_cleanup_loop, DiskSizeFn, RelationalDB, Txdata}; use crate::db::{self, spawn_tx_metrics_recorder}; use crate::energy::{EnergyMonitor, EnergyQuanta, NullEnergyMonitor}; +use crate::host::reducer_router::{LocalReducerRouter, ReducerCallRouter}; use crate::host::v8::V8Runtime; use crate::host::ProcedureCallError; use crate::messages::control_db::{Database, HostType}; use crate::module_host_context::ModuleCreationContext; -use crate::host::reducer_router::{LocalReducerRouter, ReducerCallRouter}; use crate::replica_context::{CallReducerOnDbConfig, ReplicaContext}; use crate::subscription::module_subscription_actor::ModuleSubscriptions; use crate::subscription::module_subscription_manager::{spawn_send_worker, SubscriptionManager, TransactionOffset}; @@ -682,6 +682,7 @@ fn stored_program_hash(db: &RelationalDB) -> anyhow::Result> { Ok(meta.map(|meta| meta.program_hash)) } +#[allow(clippy::too_many_arguments)] async fn make_replica_ctx( module_logs: Option, database: Database, diff --git a/crates/core/src/host/reducer_router.rs b/crates/core/src/host/reducer_router.rs index 93891ecc6e6..dcbf20c51c8 100644 --- a/crates/core/src/host/reducer_router.rs +++ b/crates/core/src/host/reducer_router.rs @@ -49,7 +49,9 @@ pub struct LocalReducerRouter { impl LocalReducerRouter { pub fn new(base_url: impl Into) -> Self { - Self { base_url: base_url.into() } + Self { + base_url: base_url.into(), + } } } diff --git a/crates/core/src/host/wasmtime/wasm_instance_env.rs b/crates/core/src/host/wasmtime/wasm_instance_env.rs index 87182c30be2..7ed23331d7e 100644 --- a/crates/core/src/host/wasmtime/wasm_instance_env.rs +++ b/crates/core/src/host/wasmtime/wasm_instance_env.rs @@ -1996,7 +1996,9 @@ impl WasmInstanceEnv { // Spawn a new OS thread and call Handle::block_on from there, which is // designed to be called from synchronous (non-async) contexts. let handle = tokio::runtime::Handle::current(); - let fut = env.instance_env.call_reducer_on_db(database_identity, &reducer_name, args); + let fut = env + .instance_env + .call_reducer_on_db(database_identity, &reducer_name, args); let result = std::thread::scope(|s| { s.spawn(|| handle.block_on(fut)) .join() diff --git a/crates/smoketests/tests/smoketests/mod.rs b/crates/smoketests/tests/smoketests/mod.rs index d046c795d88..18ad7b51199 100644 --- a/crates/smoketests/tests/smoketests/mod.rs +++ b/crates/smoketests/tests/smoketests/mod.rs @@ -4,12 +4,12 @@ mod auto_inc; mod auto_migration; mod call; mod change_host_type; -mod cross_db_reducer; mod cli; mod client_connection_errors; mod confirmed_reads; mod connect_disconnect_from_cli; mod create_project; +mod cross_db_reducer; mod csharp_module; mod default_module_clippy; mod delete_database; diff --git a/crates/standalone/src/lib.rs b/crates/standalone/src/lib.rs index 6772703c379..3b97c6d46fd 100644 --- a/crates/standalone/src/lib.rs +++ b/crates/standalone/src/lib.rs @@ -22,7 +22,7 @@ use spacetimedb::messages::control_db::{Database, Node, Replica}; use spacetimedb::subscription::row_list_builder_pool::BsatnRowListBuilderPool; use spacetimedb::util::jobs::JobCores; use spacetimedb::worker_metrics::WORKER_METRICS; -use spacetimedb_client_api::auth::{self, JwtAuthProvider, LOCALHOST}; +use spacetimedb_client_api::auth::{self, LOCALHOST}; use spacetimedb_client_api::routes::subscribe::{HasWebSocketOptions, WebSocketOptions}; use spacetimedb_client_api::{ControlStateReadAccess, DatabaseResetDef, Host, NodeDelegate}; use spacetimedb_client_api_messages::name::{ From 9360096e638518610473e59e85f21e0759271402 Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 17:43:10 +0530 Subject: [PATCH 15/38] betetr error handling --- crates/bindings/src/remote_reducer.rs | 66 ++++++++++++++----- .../tests/smoketests/cross_db_reducer.rs | 10 +-- 2 files changed, 54 insertions(+), 22 deletions(-) diff --git a/crates/bindings/src/remote_reducer.rs b/crates/bindings/src/remote_reducer.rs index e004f495aaa..6bfdf75414e 100644 --- a/crates/bindings/src/remote_reducer.rs +++ b/crates/bindings/src/remote_reducer.rs @@ -1,10 +1,9 @@ -//! Naive binding for calling reducers on remote SpacetimeDB databases. +//! Binding for calling reducers on remote SpacetimeDB databases. //! //! Call a reducer on another database using [`call_reducer_on_db`]. //! -//! The args must be BSATN-encoded. The response body is raw bytes returned by -//! the remote database's HTTP handler. An HTTP status >= 400 does not cause an -//! `Err` return; only a transport failure (connection refused, timeout, …) does. +//! The args must be BSATN-encoded. Returns `Ok(())` when the remote reducer +//! ran and succeeded, or one of the [`RemoteCallError`] variants on failure. //! //! # Example //! @@ -16,46 +15,79 @@ //! // Empty BSATN args for a zero-argument reducer. //! let args = spacetimedb::bsatn::to_vec(&()).unwrap(); //! match remote_reducer::call_reducer_on_db(target, "my_reducer", &args) { -//! Ok((status, body)) => log::info!("status={status} body={body:?}"), -//! Err(msg) => log::error!("transport error: {msg}"), +//! Ok(()) => log::info!("remote reducer succeeded"), +//! Err(remote_reducer::RemoteCallError::Failed(msg)) => log::error!("reducer failed: {msg}"), +//! Err(remote_reducer::RemoteCallError::NotFound(msg)) => log::error!("not found: {msg}"), +//! Err(remote_reducer::RemoteCallError::Unreachable(msg)) => log::error!("unreachable: {msg}"), //! } //! } //! ``` use crate::{ - rt::{read_bytes_source_as, read_bytes_source_into}, + rt::read_bytes_source_into, Identity, IterBuf, }; +/// Error returned by [`call_reducer_on_db`]. +#[derive(Debug)] +pub enum RemoteCallError { + /// The remote reducer ran but returned an error. Contains the error message from the server. + Failed(String), + /// The target database or reducer does not exist (HTTP 404). + NotFound(String), + /// The call could not be delivered (connection refused, timeout, network error, etc.). + Unreachable(String), +} + +impl core::fmt::Display for RemoteCallError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + RemoteCallError::Failed(msg) => write!(f, "remote reducer failed: {msg}"), + RemoteCallError::NotFound(msg) => write!(f, "remote database or reducer not found: {msg}"), + RemoteCallError::Unreachable(msg) => write!(f, "remote database unreachable: {msg}"), + } + } +} + /// Call a reducer on a remote database. /// /// - `database_identity`: the target database. /// - `reducer_name`: the name of the reducer to invoke (must be valid UTF-8). /// - `args`: BSATN-encoded reducer arguments. /// -/// Returns `Ok((status, body))` on any transport success (including HTTP errors like 400/500). -/// Returns `Err(message)` on transport failure (connection refused, timeout, …). +/// Returns `Ok(())` when the remote reducer ran and succeeded. +/// Returns `Err(RemoteCallError::Failed(msg))` when the reducer ran but returned an error. +/// Returns `Err(RemoteCallError::NotFound(msg))` when the database or reducer does not exist. +/// Returns `Err(RemoteCallError::Unreachable(msg))` on transport failure (connection refused, timeout, …). pub fn call_reducer_on_db( database_identity: Identity, reducer_name: &str, args: &[u8], -) -> Result<(u16, Vec), String> { +) -> Result<(), RemoteCallError> { let identity_bytes = database_identity.to_byte_array(); match spacetimedb_bindings_sys::call_reducer_on_db(identity_bytes, reducer_name, args) { Ok((status, body_source)) => { - // INVALID signals an empty body (host optimization to avoid allocation). - let body = if body_source == spacetimedb_bindings_sys::raw::BytesSource::INVALID { - Vec::new() + if status < 300 { + return Ok(()); + } + // Decode the response body as the error message. + let msg = if body_source == spacetimedb_bindings_sys::raw::BytesSource::INVALID { + String::new() } else { let mut buf = IterBuf::take(); read_bytes_source_into(body_source, &mut buf); - buf.to_vec() + String::from_utf8_lossy(&buf).into_owned() }; - Ok((status, body)) + if status == 404 { + Err(RemoteCallError::NotFound(msg)) + } else { + Err(RemoteCallError::Failed(msg)) + } } Err(err_source) => { - let message = read_bytes_source_as::(err_source); - Err(message) + use crate::rt::read_bytes_source_as; + let msg = read_bytes_source_as::(err_source); + Err(RemoteCallError::Unreachable(msg)) } } } diff --git a/crates/smoketests/tests/smoketests/cross_db_reducer.rs b/crates/smoketests/tests/smoketests/cross_db_reducer.rs index f4d31364333..22d4a94b9f1 100644 --- a/crates/smoketests/tests/smoketests/cross_db_reducer.rs +++ b/crates/smoketests/tests/smoketests/cross_db_reducer.rs @@ -42,12 +42,12 @@ pub fn call_remote(ctx: &ReducerContext, target_hex: String, message: String, pr let payload = PingPayload { message, priority }; let args = spacetimedb::spacetimedb_lib::bsatn::to_vec(&(payload,)).expect("failed to encode args"); match spacetimedb::remote_reducer::call_reducer_on_db(target, "record_ping", &args) { - Ok((status, _body)) => { - log::info!("call_remote: got HTTP status {}", status); + Ok(()) => { + log::info!("call_remote: remote reducer succeeded"); } - Err(err) => { - log::error!("call_remote: transport failure: {}", err); - panic!("call_reducer_on_db transport failure: {err}"); + Err(e) => { + log::error!("call_remote: {}", e); + panic!("call_reducer_on_db error: {e}"); } } } From faccc62ae9f0ded3bda1bf47a6e65a9b2b70e7ab Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 18:16:34 +0530 Subject: [PATCH 16/38] fmt --- crates/bindings/src/remote_reducer.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/crates/bindings/src/remote_reducer.rs b/crates/bindings/src/remote_reducer.rs index 6bfdf75414e..bded8bc5ae7 100644 --- a/crates/bindings/src/remote_reducer.rs +++ b/crates/bindings/src/remote_reducer.rs @@ -23,10 +23,7 @@ //! } //! ``` -use crate::{ - rt::read_bytes_source_into, - Identity, IterBuf, -}; +use crate::{rt::read_bytes_source_into, Identity, IterBuf}; /// Error returned by [`call_reducer_on_db`]. #[derive(Debug)] @@ -59,11 +56,7 @@ impl core::fmt::Display for RemoteCallError { /// Returns `Err(RemoteCallError::Failed(msg))` when the reducer ran but returned an error. /// Returns `Err(RemoteCallError::NotFound(msg))` when the database or reducer does not exist. /// Returns `Err(RemoteCallError::Unreachable(msg))` on transport failure (connection refused, timeout, …). -pub fn call_reducer_on_db( - database_identity: Identity, - reducer_name: &str, - args: &[u8], -) -> Result<(), RemoteCallError> { +pub fn call_reducer_on_db(database_identity: Identity, reducer_name: &str, args: &[u8]) -> Result<(), RemoteCallError> { let identity_bytes = database_identity.to_byte_array(); match spacetimedb_bindings_sys::call_reducer_on_db(identity_bytes, reducer_name, args) { Ok((status, body_source)) => { From 22eeae0e5a1d047586af3385f0ba3c5bf6eda07d Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 19:04:11 +0530 Subject: [PATCH 17/38] docs --- crates/bindings-sys/src/lib.rs | 4 ++-- crates/core/src/host/wasmtime/wasm_instance_env.rs | 2 +- crates/core/src/replica_context.rs | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/bindings-sys/src/lib.rs b/crates/bindings-sys/src/lib.rs index 1bad2a8abff..8854ae393b6 100644 --- a/crates/bindings-sys/src/lib.rs +++ b/crates/bindings-sys/src/lib.rs @@ -867,7 +867,7 @@ pub mod raw { #[link(wasm_import_module = "spacetime_10.5")] unsafe extern "C" { - /// Call a reducer on another SpacetimeDB database via the local reverse proxy at `localhost:80`. + /// Call a reducer on another SpacetimeDB database. /// /// - `identity_ptr` must point to exactly 32 bytes — the BSATN (little-endian) encoding of /// the target database `Identity`. @@ -879,7 +879,7 @@ pub mod raw { /// - Writes a [`BytesSource`] containing the response body bytes to `*out`. /// /// On transport failure (connection refused, timeout, etc.): - /// - Returns [`errno::HTTP_ERROR`] (21). + /// - Returns `errno::HTTP_ERROR` (21). /// - Writes a [`BytesSource`] containing a BSATN-encoded error [`String`] to `*out`. /// /// Unlike `procedure_http_request`, this syscall may be called while a transaction diff --git a/crates/core/src/host/wasmtime/wasm_instance_env.rs b/crates/core/src/host/wasmtime/wasm_instance_env.rs index 7ed23331d7e..f24b8a4f3dc 100644 --- a/crates/core/src/host/wasmtime/wasm_instance_env.rs +++ b/crates/core/src/host/wasmtime/wasm_instance_env.rs @@ -1944,7 +1944,7 @@ impl WasmInstanceEnv { }) } - /// Call a reducer on another SpacetimeDB database via the local reverse proxy at `localhost:80`. + /// Call a reducer on another SpacetimeDB database. /// /// - `identity_ptr` must point to exactly 32 bytes — the BSATN (little-endian) encoding of the /// target [`Identity`]. diff --git a/crates/core/src/replica_context.rs b/crates/core/src/replica_context.rs index 2ecec56273e..8c9f8804f24 100644 --- a/crates/core/src/replica_context.rs +++ b/crates/core/src/replica_context.rs @@ -56,13 +56,13 @@ pub struct ReplicaContext { /// - Standalone: always returns the local node URL ([`crate::host::reducer_router::LocalReducerRouter`]). /// - Cluster: queries the control DB to find the leader replica's node. pub call_reducer_router: Arc, - /// Pre-signed `Authorization: Bearer ` value for outgoing cross-DB reducer calls. + /// `Authorization: Bearer ` value for outgoing cross-DB reducer calls. /// - /// Set at replica launch time by the deployment layer (standalone / cluster) using the - /// local JWT signing key. The token identifies this database as the caller, so the target - /// reducer sees a stable, verifiable identity instead of an anonymous ephemeral one. + /// A single node-level token set once at startup and shared by all replicas on this node. + /// Passed as a Bearer token so `anon_auth_middleware` on the target node accepts the request + /// without generating a fresh ephemeral identity per call. /// - /// `None` in contexts where no JWT signer is configured (e.g. unit tests). + /// `None` in contexts where no auth token is configured (e.g. unit tests). pub call_reducer_auth_token: Option, } From a58a4e30dd7fbfa55864541d440e353005d6183b Mon Sep 17 00:00:00 2001 From: Shubham Mishra Date: Sat, 28 Mar 2026 19:52:30 +0530 Subject: [PATCH 18/38] metrics --- crates/core/src/host/instance_env.rs | 35 ++++++++++++++++++--------- crates/core/src/worker_metrics/mod.rs | 10 ++++++++ 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/crates/core/src/host/instance_env.rs b/crates/core/src/host/instance_env.rs index 2684e60a85a..1fdc651414e 100644 --- a/crates/core/src/host/instance_env.rs +++ b/crates/core/src/host/instance_env.rs @@ -8,6 +8,7 @@ use crate::replica_context::ReplicaContext; use crate::subscription::module_subscription_actor::{commit_and_broadcast_event, ModuleSubscriptions}; use crate::subscription::module_subscription_manager::{from_tx_offset, TransactionOffset}; use crate::util::prometheus_handle::IntGaugeExt; +use crate::worker_metrics::WORKER_METRICS; use chrono::{DateTime, Utc}; use core::mem; use futures::TryFutureExt; @@ -978,13 +979,13 @@ impl InstanceEnv { }) } - /// Call a reducer on a remote database via the local reverse proxy (`localhost:80`). + /// Call a reducer on a remote database. /// /// Unlike [`Self::http_request`], this is explicitly allowed while a transaction is open — /// the caller is responsible for understanding the consistency implications. /// /// Uses [`ReplicaContext::call_reducer_router`] to resolve the leader node for - /// `database_identity`, then sends the request via the warmed HTTP/2 client in + /// `database_identity`, then sends the request via the warmed HTTP client in /// [`ReplicaContext::call_reducer_client`]. /// /// Returns `(http_status, response_body)` on transport success, @@ -1002,8 +1003,11 @@ impl InstanceEnv { // on this node. Passed as a Bearer token so `anon_auth_middleware` on the target node // accepts the request without generating a fresh ephemeral identity per call. let auth_token = self.replica_ctx.call_reducer_auth_token.clone(); + let caller_identity = self.replica_ctx.database.database_identity; async move { + let start = Instant::now(); + let base_url = router .resolve_base_url(database_identity) .await @@ -1021,15 +1025,24 @@ impl InstanceEnv { if let Some(token) = auth_token { req = req.header(http::header::AUTHORIZATION, format!("Bearer {token}")); } - let response = req.send().await.map_err(|e| NodesError::HttpError(e.to_string()))?; - - let status = response.status().as_u16(); - let body = response - .bytes() - .await - .map_err(|e| NodesError::HttpError(e.to_string()))?; - - Ok((status, body)) + let result = async { + let response = req.send().await.map_err(|e| NodesError::HttpError(e.to_string()))?; + let status = response.status().as_u16(); + let body = response.bytes().await.map_err(|e| NodesError::HttpError(e.to_string()))?; + Ok((status, body)) + } + .await; + + WORKER_METRICS + .cross_db_reducer_calls_total + .with_label_values(&caller_identity) + .inc(); + WORKER_METRICS + .cross_db_reducer_duration_seconds + .with_label_values(&caller_identity) + .observe(start.elapsed().as_secs_f64()); + + result } } } diff --git a/crates/core/src/worker_metrics/mod.rs b/crates/core/src/worker_metrics/mod.rs index 6421a95d7cb..5b84e230045 100644 --- a/crates/core/src/worker_metrics/mod.rs +++ b/crates/core/src/worker_metrics/mod.rs @@ -43,6 +43,16 @@ metrics_group!( #[labels(database_identity: Identity, protocol: str)] pub websocket_request_msg_size: HistogramVec, + #[name = spacetime_cross_db_reducer_calls_total] + #[help = "Total number of cross-database reducer calls made by this database."] + #[labels(caller_identity: Identity)] + pub cross_db_reducer_calls_total: IntCounterVec, + + #[name = spacetime_cross_db_reducer_duration_seconds] + #[help = "Duration of cross-database reducer calls in seconds."] + #[labels(caller_identity: Identity)] + pub cross_db_reducer_duration_seconds: HistogramVec, + #[name = jemalloc_active_bytes] #[help = "Number of bytes in jemallocs heap"] #[labels(node_id: str)] From 33e8ec01024d0cc963ea3eafe265140a50acb723 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 11:51:17 -0400 Subject: [PATCH 19/38] Extend timeout on remote calls --- modules/tpcc/src/remote.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index 7bb12c82c39..74f089d6904 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -1,5 +1,10 @@ +use std::time::Duration; + use http::Request; -use spacetimedb::{reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, Table, TxContext}; +use spacetimedb::{ + http::Timeout, reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, Table, TimeDuration, + TxContext, +}; use spacetimedb_sats::bsatn; use crate::WarehouseId; @@ -60,6 +65,8 @@ pub fn call_remote_function( )) .method("POST") .header("Content-Type", "application/octet-stream") + // This absurdly long timeout will be clamped by the host to 3 minutes. + .extension(Timeout::from(TimeDuration::from_duration(Duration::from_hours(1)))) // TODO(auth): include a token. .body(bsatn::to_vec(&arguments).map_err(|e| format!("Failed to BSATN-serialize arguments: {e}"))?) .map_err(|e| format!("Error constructing `Request`: {e}"))?; From 311c1f2c12038cceca05823888d973f275d17c74 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 12:12:15 -0400 Subject: [PATCH 20/38] Add a whole bunch of logging on start and end of operations --- modules/tpcc/src/lib.rs | 63 ++++++++-- modules/tpcc/src/new_order.rs | 203 +++++++++++++++++--------------- modules/tpcc/src/payment.rs | 170 +++++++++++++------------- modules/tpcc/src/remote.rs | 20 +++- tools/tpcc-runner/src/client.rs | 19 +++ 5 files changed, 291 insertions(+), 184 deletions(-) diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index b218fcd8aa6..eb146e748f1 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -430,7 +430,15 @@ pub fn order_status( d_id: u8, customer: CustomerSelector, ) -> Result { - ctx.try_with_tx(|tx| order_status_tx(tx, w_id, d_id, &customer)) + let start_time = ctx.timestamp; + log::debug!("Starting `order_status` at {start_time:?}"); + let res = ctx.try_with_tx(|tx| order_status_tx(tx, w_id, d_id, &customer)); + + match &res { + Ok(_) => log::debug!("Succesfully finished `order_status` at {start_time:?}"), + Err(e) => log::error!("Failed `order_status` at {start_time:?}: {e}"), + } + res } #[procedure] @@ -440,7 +448,15 @@ pub fn stock_level( d_id: u8, threshold: i32, ) -> Result { - ctx.try_with_tx(|tx| stock_level_tx(tx, w_id, d_id, threshold)) + let start_time = ctx.timestamp; + log::debug!("Starting `stock_level` at {start_time:?}"); + let res = ctx.try_with_tx(|tx| stock_level_tx(tx, w_id, d_id, threshold)); + + match &res { + Ok(_) => log::debug!("Succesfully finished `stock_level` at {start_time:?}"), + Err(e) => log::error!("Failed `stock_level` at {start_time:?}: {e}"), + } + res } #[procedure] @@ -454,7 +470,8 @@ pub fn queue_delivery( carrier_id: u8, ) -> Result { let queued_at = ctx.timestamp; - ctx.try_with_tx(|tx| { + log::debug!("Starting `queue_delivery` at {queued_at:?}"); + let res = ctx.try_with_tx(|tx| { ensure_warehouse_exists(tx, w_id)?; ensure!((1..=10).contains(&carrier_id), "carrier_id must be in the range 1..=10"); @@ -479,12 +496,20 @@ pub fn queue_delivery( warehouse_id: w_id, carrier_id, }) - }) + }); + + match &res { + Ok(_) => log::debug!("Succesfully finished `queue_delivery` at {queued_at:?}"), + Err(e) => log::error!("Failed `queue_delivery` at {queued_at:?}: {e}"), + } + res } #[procedure] pub fn delivery_progress(ctx: &mut ProcedureContext, run_id: String) -> Result { - ctx.try_with_tx(|tx| { + let start_time = ctx.timestamp; + log::debug!("Starting `delivery_progress` at {start_time:?}"); + let res = ctx.try_with_tx(|tx| { let pending_jobs = tx.db.delivery_job().by_run_id().filter(&run_id).count() as u64; let completed_jobs = tx .db @@ -497,7 +522,17 @@ pub fn delivery_progress(ctx: &mut ProcedureContext, run_id: String) -> Result { + log::debug!("Successfully finished `delivery_progress` at {start_time:?}"); + } + Err(e) => { + log::error!("Failed `delivery_progress` at {start_time:?}: {e}"); + } + } + res } #[procedure] @@ -507,7 +542,9 @@ pub fn fetch_delivery_completions( after_completion_id: u64, limit: u32, ) -> Result, String> { - ctx.try_with_tx(|tx| { + let start_time = ctx.timestamp; + log::debug!("Starting `fetch_delivery_completions` at {start_time:?}"); + let res = ctx.try_with_tx(|tx| { let limit = limit as usize; let rows = tx .db @@ -518,7 +555,17 @@ pub fn fetch_delivery_completions( .map(as_delivery_completion_view) .collect(); Ok(rows) - }) + }); + + match &res { + Ok(_) => { + log::debug!("Successfully finished `fetch_delivery_completions` at {start_time:?}"); + } + Err(e) => { + log::error!("Failed `fetch_delivery_completions` at {start_time:?}: {e}"); + } + } + res } #[reducer] diff --git a/modules/tpcc/src/new_order.rs b/modules/tpcc/src/new_order.rs index 1428459d124..07b83ed02aa 100644 --- a/modules/tpcc/src/new_order.rs +++ b/modules/tpcc/src/new_order.rs @@ -50,31 +50,35 @@ pub fn new_order( c_id: u32, order_lines: Vec, ) -> Result { - ensure!( - (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), - "district id out of range" - ); - ensure!( - (5..=15).contains(&order_lines.len()), - "new-order requires between 5 and 15 order lines" - ); - - // Setup TX: validate warehouse, district, customer ID. - // NON-CONFORMANT: These never change in TPC-C, - // so we don't need to include the checks in the same transaction as the rest of the work. - let (warehouse, district, customer, spacetimedb_uri) = ctx.try_with_tx(|tx| { - let warehouse = find_warehouse(tx, w_id)?; - let district = find_district(tx, w_id, d_id)?; - let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; - let spacetimedb_uri = get_spacetimedb_uri(tx); - Ok::<_, String>((warehouse, district, customer, spacetimedb_uri)) - })?; - - let PartitionedItems { - local_database_items, - remote_database_items, - all_local_warehouse, - } = + let start_time = ctx.timestamp; + log::debug!("Starting `new_order` transaction at {start_time:?}"); + + let res = (|| { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), + "district id out of range" + ); + ensure!( + (5..=15).contains(&order_lines.len()), + "new-order requires between 5 and 15 order lines" + ); + + // Setup TX: validate warehouse, district, customer ID. + // NON-CONFORMANT: These never change in TPC-C, + // so we don't need to include the checks in the same transaction as the rest of the work. + let (warehouse, district, customer, spacetimedb_uri) = ctx.try_with_tx(|tx| { + let warehouse = find_warehouse(tx, w_id)?; + let district = find_district(tx, w_id, d_id)?; + let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; + let spacetimedb_uri = get_spacetimedb_uri(tx); + Ok::<_, String>((warehouse, district, customer, spacetimedb_uri)) + })?; + + let PartitionedItems { + local_database_items, + remote_database_items, + all_local_warehouse, + } = // Look up all of the items in the order, and fail if any of them doesn't exist. // If they all exist, sort them into two groups: // - `local_database_items`, items in warehouses managed by this database. @@ -84,80 +88,91 @@ pub fn new_order( // which updates stock quantities for the local items and records the new order. // In a real system, an item might change between the two, but none of the TPC-C transactions writes to items. // We (ab)use this knowledge to skip compensating for writes to items. - partition_local_from_remote_database_items(ctx, w_id, &order_lines)?; - - // NON-CONFORMANT: We reserve items from the remote database extra-transactionally. - // If our TPC-C transaction fails, we'll roll back those reservations. - // This opens us up to dirty read isolation hazards, - // where a concurrent transaction may observe a change in stock quantity that later rolls back. - // This will never happen with only the TPC-C transactions, - // as stock quantity is only written by the `new_order` transaction, - // and `new_order` can only fail prior to updating the stock quantity, due to non-existent items. - // We (ab)use this knowledge to skip compensating for rollbacks to prevent dirty reads. - let remote_item_reservations = reserve_remote_items(ctx, &spacetimedb_uri, d_id, &remote_database_items)?; - - match ctx.try_with_tx(|tx| { - let district = tx - .db - .district() - .district_key() - .find(district.district_key) - .expect("District should not have been removed since we retrieved it last"); - let order_id = district.d_next_o_id; - tx.db.district().district_key().update(District { - d_next_o_id: order_id + 1, - ..district - }); - - let line_results = local_database_items - .iter() - .map(|local_item| claim_stock_for_local_database_item(tx, local_item, d_id)) - .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( - |(remote_item, reserved_item)| remote_item_to_processed_new_order_item(remote_item, reserved_item), - )) - .map(|processed_item| insert_order_line(tx, w_id, d_id, order_id, processed_item)) - .collect::>(); - - let subtotal_cents = line_results.iter().map(|line_result| line_result.amount_cents).sum(); - - let taxed = apply_tax( - subtotal_cents, - i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), - ); - let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); - - Ok(NewOrderResult { - warehouse_tax_bps: warehouse.w_tax_bps, - district_tax_bps: district.d_tax_bps, - customer_discount_bps: customer.c_discount_bps, - customer_last: customer.c_last.clone(), - customer_credit: customer.c_credit.clone(), - order_id, - entry_d: tx.timestamp, - total_amount_cents, - all_local: all_local_warehouse, - lines: line_results, - }) - }) { - Ok(result) => { - confirm_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - &remote_database_items, - remote_item_reservations, + partition_local_from_remote_database_items(ctx, w_id, &order_lines)?; + + // NON-CONFORMANT: We reserve items from the remote database extra-transactionally. + // If our TPC-C transaction fails, we'll roll back those reservations. + // This opens us up to dirty read isolation hazards, + // where a concurrent transaction may observe a change in stock quantity that later rolls back. + // This will never happen with only the TPC-C transactions, + // as stock quantity is only written by the `new_order` transaction, + // and `new_order` can only fail prior to updating the stock quantity, due to non-existent items. + // We (ab)use this knowledge to skip compensating for rollbacks to prevent dirty reads. + let remote_item_reservations = reserve_remote_items(ctx, &spacetimedb_uri, d_id, &remote_database_items)?; + + match ctx.try_with_tx(|tx| { + let district = tx + .db + .district() + .district_key() + .find(district.district_key) + .expect("District should not have been removed since we retrieved it last"); + let order_id = district.d_next_o_id; + tx.db.district().district_key().update(District { + d_next_o_id: order_id + 1, + ..district + }); + + let line_results = local_database_items + .iter() + .map(|local_item| claim_stock_for_local_database_item(tx, local_item, d_id)) + .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( + |(remote_item, reserved_item)| remote_item_to_processed_new_order_item(remote_item, reserved_item), + )) + .map(|processed_item| insert_order_line(tx, w_id, d_id, order_id, processed_item)) + .collect::>(); + + let subtotal_cents = line_results.iter().map(|line_result| line_result.amount_cents).sum(); + + let taxed = apply_tax( + subtotal_cents, + i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), ); - Ok(result) + let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); + + Ok(NewOrderResult { + warehouse_tax_bps: warehouse.w_tax_bps, + district_tax_bps: district.d_tax_bps, + customer_discount_bps: customer.c_discount_bps, + customer_last: customer.c_last.clone(), + customer_credit: customer.c_credit.clone(), + order_id, + entry_d: tx.timestamp, + total_amount_cents, + all_local: all_local_warehouse, + lines: line_results, + }) + }) { + Ok(result) => { + confirm_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + &remote_database_items, + remote_item_reservations, + ); + Ok(result) + } + Err(e) => { + rollback_all_remote_item_reservations( + ctx, + &spacetimedb_uri, + &remote_database_items, + remote_item_reservations, + ); + Err(e) + } + } + })(); + + match &res { + Ok(_) => { + log::debug!("Successfully finished `new_order` at {start_time:?}"); } Err(e) => { - rollback_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - &remote_database_items, - remote_item_reservations, - ); - Err(e) + log::error!("Failed `new_order` at {start_time:?}: {e}"); } } + res } struct LocalDatabaseItem { diff --git a/modules/tpcc/src/payment.rs b/modules/tpcc/src/payment.rs index 938e00748ad..ea6add08b5c 100644 --- a/modules/tpcc/src/payment.rs +++ b/modules/tpcc/src/payment.rs @@ -34,91 +34,103 @@ pub fn payment( ) -> Result { let now = ctx.timestamp; - let (warehouse_home, spacetimedb_uri) = - ctx.with_tx(|tx| (remote_warehouse_home(tx, c_w_id), get_spacetimedb_uri(tx))); - let payment_request = PaymentRequest { - terminal_warehouse_id: w_id, - terminal_district_id: d_id, - customer_warehouse_id: c_w_id, - customer_district_id: c_d_id, - customer_selector: customer, - payment_amount_cents, - now, - }; - let customer = match warehouse_home { - None => { - // Customer warehouse is managed by this database. - ctx.try_with_tx(|tx| { - let customer = resolve_customer(tx, c_w_id, c_d_id, &payment_request.customer_selector)?; - Ok::<_, String>(update_customer(tx, &payment_request, customer)) - })? - } - Some(remote_database) => { - // Customer warehouse is managed by a remote database. - // Contact them to update the customer's balance and retrieve their info. - let body = call_remote_function( - ctx, - &spacetimedb_uri, - remote_database, - "process_remote_payment", - payment_request.clone(), - )? - .into_string() - .expect("Body should be valid UTF-8"); - let res: SerdeWrapper> = - serde_json::from_str(&body).expect("Response does not conform to expected schema"); - res.0? - } - }; + let res = (|| { + let (warehouse_home, spacetimedb_uri) = + ctx.with_tx(|tx| (remote_warehouse_home(tx, c_w_id), get_spacetimedb_uri(tx))); + let payment_request = PaymentRequest { + terminal_warehouse_id: w_id, + terminal_district_id: d_id, + customer_warehouse_id: c_w_id, + customer_district_id: c_d_id, + customer_selector: customer, + payment_amount_cents, + now, + }; + let customer = match warehouse_home { + None => { + // Customer warehouse is managed by this database. + ctx.try_with_tx(|tx| { + let customer = resolve_customer(tx, c_w_id, c_d_id, &payment_request.customer_selector)?; + Ok::<_, String>(update_customer(tx, &payment_request, customer)) + })? + } + Some(remote_database) => { + // Customer warehouse is managed by a remote database. + // Contact them to update the customer's balance and retrieve their info. + let body = call_remote_function( + ctx, + &spacetimedb_uri, + remote_database, + "process_remote_payment", + payment_request.clone(), + )? + .into_string() + .expect("Body should be valid UTF-8"); + let res: SerdeWrapper> = + serde_json::from_str(&body).expect("Response does not conform to expected schema"); + res.0? + } + }; - ctx.try_with_tx(|tx| { - let warehouse = find_warehouse(tx, payment_request.terminal_warehouse_id)?; - let district = find_district( - tx, - payment_request.terminal_warehouse_id, - payment_request.terminal_district_id, - )?; + ctx.try_with_tx(|tx| { + let warehouse = find_warehouse(tx, payment_request.terminal_warehouse_id)?; + let district = find_district( + tx, + payment_request.terminal_warehouse_id, + payment_request.terminal_district_id, + )?; - tx.db.warehouse().w_id().update(Warehouse { - w_ytd_cents: warehouse.w_ytd_cents + payment_request.payment_amount_cents, - ..warehouse.clone() - }); + tx.db.warehouse().w_id().update(Warehouse { + w_ytd_cents: warehouse.w_ytd_cents + payment_request.payment_amount_cents, + ..warehouse.clone() + }); - tx.db.district().district_key().update(District { - d_ytd_cents: district.d_ytd_cents + payment_request.payment_amount_cents, - ..district.clone() - }); + tx.db.district().district_key().update(District { + d_ytd_cents: district.d_ytd_cents + payment_request.payment_amount_cents, + ..district.clone() + }); - tx.db.history().insert(History { - history_id: 0, - h_c_id: customer.c_id, - h_c_d_id: customer.c_d_id, - h_c_w_id: customer.c_w_id, - h_d_id: payment_request.terminal_district_id, - h_w_id: payment_request.terminal_warehouse_id, - h_date: payment_request.now, - h_amount_cents: payment_request.payment_amount_cents, - h_data: format!("{} {}", warehouse.w_name, district.d_name), - }); + tx.db.history().insert(History { + history_id: 0, + h_c_id: customer.c_id, + h_c_d_id: customer.c_d_id, + h_c_w_id: customer.c_w_id, + h_d_id: payment_request.terminal_district_id, + h_w_id: payment_request.terminal_warehouse_id, + h_date: payment_request.now, + h_amount_cents: payment_request.payment_amount_cents, + h_data: format!("{} {}", warehouse.w_name, district.d_name), + }); - Ok(PaymentResult { - warehouse_name: warehouse.w_name, - district_name: district.d_name, - customer_id: customer.c_id, - customer_first: customer.c_first.clone(), - customer_middle: customer.c_middle.clone(), - customer_last: customer.c_last.clone(), - customer_balance_cents: customer.c_balance_cents, - customer_credit: customer.c_credit.clone(), - customer_discount_bps: customer.c_discount_bps, - payment_amount_cents: payment_request.payment_amount_cents, - customer_data: if customer.c_credit == "BC" { - Some(customer.c_data.clone()) - } else { - None - }, + Ok(PaymentResult { + warehouse_name: warehouse.w_name, + district_name: district.d_name, + customer_id: customer.c_id, + customer_first: customer.c_first.clone(), + customer_middle: customer.c_middle.clone(), + customer_last: customer.c_last.clone(), + customer_balance_cents: customer.c_balance_cents, + customer_credit: customer.c_credit.clone(), + customer_discount_bps: customer.c_discount_bps, + payment_amount_cents: payment_request.payment_amount_cents, + customer_data: if customer.c_credit == "BC" { + Some(customer.c_data.clone()) + } else { + None + }, + }) }) - }) + })(); + + match &res { + Ok(_) => { + log::debug!("Successfully finished `payment` at {now:?}"); + } + Err(e) => { + log::error!("Failed `payment` at {now:?}: {e}"); + } + } + res } #[derive(SpacetimeType, Clone)] diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index 74f089d6904..cc129a025e9 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -70,9 +70,23 @@ pub fn call_remote_function( // TODO(auth): include a token. .body(bsatn::to_vec(&arguments).map_err(|e| format!("Failed to BSATN-serialize arguments: {e}"))?) .map_err(|e| format!("Error constructing `Request`: {e}"))?; + log::debug!("Sending remote request to run {function_name} on {spacetimedb_uri} / {database_ident}"); match ctx.http.send(request) { - Err(e) => Err(format!("Error sending request to remote database {database_ident} at URI {spacetimedb_uri} to call {function_name}: {e}")), - Ok(response) if response.status() != http::status::StatusCode::OK => Err(format!("Got non-200 response code {} from request to remote database {database_ident} at URI {spacetimedb_uri} when calling {function_name}: {}", response.status(), response.into_body().into_string_lossy())), - Ok(response) => Ok(response.into_body()), + Err(e) => { + let msg = format!("Error sending request to remote database {database_ident} at URI {spacetimedb_uri} to call {function_name}: {e}"); + log::error!("{}", msg); + Err(msg) + } + Ok(response) if response.status() != http::status::StatusCode::OK => { + let msg = format!("Got non-200 response code {} from request to remote database {database_ident} at URI {spacetimedb_uri} when calling {function_name}: {}", response.status(), response.into_body().into_string_lossy()); + log::error!("{}", msg); + Err(msg) + } + Ok(response) => { + log::debug!( + "Got successful response from {spacetimedb_uri} / {database_ident} when running {function_name}" + ); + Ok(response.into_body()) + } } } diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs index 1055d377e5c..0dd6e67cc00 100644 --- a/tools/tpcc-runner/src/client.rs +++ b/tools/tpcc-runner/src/client.rs @@ -51,6 +51,7 @@ impl ModuleClient { self.conn .reducers .set_spacetimedb_uri_then(uri.to_string(), move |_, res| { + log::debug!("Got response from `set_spacetimedb_uri`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -64,6 +65,7 @@ impl ModuleClient { pub fn reset_tpcc(&self) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.reset_tpcc_then(move |_, res| { + log::debug!("Got response from `reset_tpcc`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -77,6 +79,7 @@ impl ModuleClient { pub fn load_remote_warehouses(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_remote_warehouses_then(rows, move |_, res| { + log::debug!("Got response from `load_remote_warehouse`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -90,6 +93,7 @@ impl ModuleClient { pub fn load_warehouses(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_warehouses_then(rows, move |_, res| { + log::debug!("Got response from `load_warehouses`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -103,6 +107,7 @@ impl ModuleClient { pub fn load_districts(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_districts_then(rows, move |_, res| { + log::debug!("Got response from `load_districts`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -116,6 +121,7 @@ impl ModuleClient { pub fn load_customers(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_customers_then(rows, move |_, res| { + log::debug!("Got response from `load_customers`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -129,6 +135,7 @@ impl ModuleClient { pub fn load_history(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_history_then(rows, move |_, res| { + log::debug!("Got response from `load_history`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -142,6 +149,7 @@ impl ModuleClient { pub fn load_items(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_items_then(rows, move |_, res| { + log::debug!("Got response from `load_items`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -155,6 +163,7 @@ impl ModuleClient { pub fn load_stocks(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_stocks_then(rows, move |_, res| { + log::debug!("Got response from `load_stocks`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -168,6 +177,7 @@ impl ModuleClient { pub fn load_orders(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_orders_then(rows, move |_, res| { + log::debug!("Got response from `load_orders`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -181,6 +191,7 @@ impl ModuleClient { pub fn load_new_orders(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_new_orders_then(rows, move |_, res| { + log::debug!("Got response from `load_new_orders`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -194,6 +205,7 @@ impl ModuleClient { pub fn load_order_lines(&self, rows: Vec) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.load_order_lines_then(rows, move |_, res| { + log::debug!("Got response from `load_order_lines`: {res:?}"); let _ = tx.send(res); })?; match rx.recv_timeout(self.timeout) { @@ -215,6 +227,7 @@ impl ModuleClient { self.conn .procedures .new_order_then(w_id, d_id, c_id, order_lines, move |_, res| { + log::debug!("Got response from `new_order`: {res:?}"); let _ = tx.send(res); }); match rx.recv_timeout(self.timeout) { @@ -242,6 +255,7 @@ impl ModuleClient { customer, payment_amount_cents, move |_, res| { + log::debug!("Got response from `payment`: {res:?}"); let _ = tx.send(res); }, ); @@ -262,6 +276,7 @@ impl ModuleClient { self.conn .procedures .order_status_then(w_id, d_id, customer, move |_, res| { + log::debug!("Got response from `order_status`: {res:?}"); let _ = tx.send(res); }); match rx.recv_timeout(self.timeout) { @@ -276,6 +291,7 @@ impl ModuleClient { self.conn .procedures .stock_level_then(w_id, d_id, threshold, move |_, res| { + log::debug!("Got response from `stock_level`: {res:?}"); let _ = tx.send(res); }); match rx.recv_timeout(self.timeout) { @@ -303,6 +319,7 @@ impl ModuleClient { w_id, carrier_id, move |_, res| { + log::debug!("Got response from `queue_delivery`: {res:?}"); let _ = tx.send(res); }, ); @@ -316,6 +333,7 @@ impl ModuleClient { pub fn delivery_progress(&self, run_id: String) -> Result> { let (tx, rx) = sync_channel(1); self.conn.procedures.delivery_progress_then(run_id, move |_, res| { + log::debug!("Got response from `delivery_progress`: {res:?}"); let _ = tx.send(res); }); match rx.recv_timeout(self.timeout) { @@ -335,6 +353,7 @@ impl ModuleClient { self.conn .procedures .fetch_delivery_completions_then(run_id, after_completion_id, limit, move |_, res| { + log::debug!("Got response from `fetch_delivery_completions`: {res:?}"); let _ = tx.send(res); }); match rx.recv_timeout(self.timeout) { From c089c3adb0ce89e2bde3d143bf1189aaee10a37e Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 12:26:05 -0400 Subject: [PATCH 21/38] Hopefully fix a spurious timeout bug --- tools/tpcc-runner/src/driver.rs | 10 +++-- tools/tpcc-runner/src/topology.rs | 73 +++++++++++++++++++------------ 2 files changed, 51 insertions(+), 32 deletions(-) diff --git a/tools/tpcc-runner/src/driver.rs b/tools/tpcc-runner/src/driver.rs index 09d7723d391..6e5ba57558e 100644 --- a/tools/tpcc-runner/src/driver.rs +++ b/tools/tpcc-runner/src/driver.rs @@ -603,9 +603,13 @@ async fn harvest_delivery_completions( ); } - for (_, client) in clients { - client.shutdown(); - } + // It's not actually correct to shut down the clients here: + // they may still be synchronously waiting for a response to an outstanding TX in another thread, + // and if we shut them down it will never come, meaning they will fail and abort. + // Instead, just let them shut down in their own time. + // for (_, client) in clients { + // client.shutdown(); + // } Ok(()) } diff --git a/tools/tpcc-runner/src/topology.rs b/tools/tpcc-runner/src/topology.rs index 5b0fd5636a3..f4e642b894c 100644 --- a/tools/tpcc-runner/src/topology.rs +++ b/tools/tpcc-runner/src/topology.rs @@ -65,36 +65,51 @@ pub fn required_database_count(warehouse_count: u16, warehouses_per_database: u1 } pub async fn lookup_database_identities(connection: &ConnectionConfig, num_databases: u16) -> Result> { - let client = Client::new(); - let mut identities = Vec::with_capacity(num_databases as usize); - for database_number in 0..num_databases { - let body = client - .get(format!( - "{}/v1/database/{}-{}", - connection.uri, connection.database_prefix, database_number - )) - .send() - .await? - .error_for_status()?; - let obj = match body.json::().await? { - serde_json::Value::Object(obj) => obj, - els => bail!("expected object while resolving database identity, got {els:?}"), - }; - let Some(db_ident) = obj.get("database_identity") else { - bail!("missing database_identity in response {obj:?}"); - }; - let serde_json::Value::Object(ident_obj) = db_ident else { - bail!("expected database_identity object, got {db_ident:?}"); - }; - let Some(ident_str) = ident_obj.get("__identity__") else { - bail!("missing __identity__ in response {ident_obj:?}"); - }; - let serde_json::Value::String(ident_str) = ident_str else { - bail!("expected __identity__ string, got {ident_str:?}"); - }; - identities.push(Identity::from_hex(ident_str)?); + log::info!( + "Looking up identities for {num_databases} at {} / {}-*", + connection.uri, + connection.database_prefix + ); + let result = async { + let client = Client::new(); + let mut identities = Vec::with_capacity(num_databases as usize); + for database_number in 0..num_databases { + let body = client + .get(format!( + "{}/v1/database/{}-{}", + connection.uri, connection.database_prefix, database_number + )) + .send() + .await? + .error_for_status()?; + let obj = match body.json::().await? { + serde_json::Value::Object(obj) => obj, + els => bail!("expected object while resolving database identity, got {els:?}"), + }; + let Some(db_ident) = obj.get("database_identity") else { + bail!("missing database_identity in response {obj:?}"); + }; + let serde_json::Value::Object(ident_obj) = db_ident else { + bail!("expected database_identity object, got {db_ident:?}"); + }; + let Some(ident_str) = ident_obj.get("__identity__") else { + bail!("missing __identity__ in response {ident_obj:?}"); + }; + let serde_json::Value::String(ident_str) = ident_str else { + bail!("expected __identity__ string, got {ident_str:?}"); + }; + identities.push(Identity::from_hex(ident_str)?); + } + Ok(identities) + } + .await; + + match &result { + Ok(_) => log::info!("Successfully got database identities"), + Err(e) => log::error!("Failed to get database identities: {e}"), } - Ok(identities) + + result } fn ensure_warehouses_per_database(warehouses_per_database: u16) -> Result<()> { From 07a71f08f6e9e41ccba51167b405f0e90f9f06f9 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Sat, 28 Mar 2026 10:08:07 -0700 Subject: [PATCH 22/38] Add parallel database loading --- tools/tpcc-runner/README.md | 17 ++++++++++++++ tools/tpcc-runner/src/config.rs | 36 ++++++++++++++++++++++------- tools/tpcc-runner/src/lib.rs | 2 +- tools/tpcc-runner/src/loader.rs | 41 +++++++++++++++++++++++++++++---- 4 files changed, 83 insertions(+), 13 deletions(-) diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md index bafcd5416f8..5a13f453287 100644 --- a/tools/tpcc-runner/README.md +++ b/tools/tpcc-runner/README.md @@ -103,6 +103,22 @@ cargo run --release -p tpcc-runner -- load \ --reset true ``` +To load databases in parallel, add `--load-parallelism `. The loader runs +databases concurrently but still loads each individual database in the normal +table order. If you omit the flag, it defaults to `min(num_databases, 8)`. + +For example, to load those two local databases in parallel: + +```bash +cargo run --release -p tpcc-runner -- load \ + --uri http://127.0.0.1:3000 \ + --database-prefix tpcc \ + --num-databases 2 \ + --warehouses-per-database 1 \ + --load-parallelism 2 \ + --reset true +``` + 5. Run a single local driver against one warehouse: ```bash @@ -195,6 +211,7 @@ timeout_secs = 30 [load] num_databases = 1 warehouses_per_database = 1 +load_parallelism = 1 batch_size = 500 reset = true diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs index 8cae12659e5..61ef3cfff82 100644 --- a/tools/tpcc-runner/src/config.rs +++ b/tools/tpcc-runner/src/config.rs @@ -35,6 +35,7 @@ pub struct LoadConfig { pub connection: ConnectionConfig, pub warehouses_per_database: u16, pub num_databases: u16, + pub load_parallelism: usize, pub batch_size: usize, pub reset: bool, } @@ -76,6 +77,8 @@ pub struct LoadArgs { #[arg(long)] pub warehouses_per_database: Option, #[arg(long)] + pub load_parallelism: Option, + #[arg(long)] pub batch_size: Option, #[arg(long)] pub reset: Option, @@ -168,6 +171,7 @@ struct FileConnectionConfig { struct FileLoadConfig { num_databases: Option, warehouses_per_database: Option, + load_parallelism: Option, batch_size: Option, reset: Option, } @@ -231,17 +235,33 @@ impl ConnectionArgs { } impl LoadArgs { - pub fn resolve(&self, file: &FileConfig) -> LoadConfig { - LoadConfig { + pub fn resolve(&self, file: &FileConfig) -> Result { + let num_databases = self.num_databases.or(file.load.num_databases).unwrap_or(1); + if num_databases == 0 { + bail!("num_databases must be positive"); + } + let warehouses_per_database = self + .warehouses_per_database + .or(file.load.warehouses_per_database) + .unwrap_or(1); + if warehouses_per_database == 0 { + bail!("warehouses_per_database must be positive"); + } + let load_parallelism = self + .load_parallelism + .or(file.load.load_parallelism) + .unwrap_or_else(|| usize::from(num_databases).min(8)); + if load_parallelism == 0 { + bail!("load_parallelism must be positive"); + } + Ok(LoadConfig { connection: self.connection.resolve(&file.connection), - num_databases: self.num_databases.or(file.load.num_databases).unwrap_or(1), - warehouses_per_database: self - .warehouses_per_database - .or(file.load.warehouses_per_database) - .unwrap_or(1), + num_databases, + warehouses_per_database, + load_parallelism: load_parallelism.min(usize::from(num_databases)), batch_size: self.batch_size.or(file.load.batch_size).unwrap_or(500), reset: self.reset.or(file.load.reset).unwrap_or(true), - } + }) } } diff --git a/tools/tpcc-runner/src/lib.rs b/tools/tpcc-runner/src/lib.rs index 3139e6983b5..52d39edc61d 100644 --- a/tools/tpcc-runner/src/lib.rs +++ b/tools/tpcc-runner/src/lib.rs @@ -20,7 +20,7 @@ pub async fn run_cli(cli: Cli) -> anyhow::Result<()> { let file_config = FileConfig::load(cli.config.as_deref())?; match cli.command { - Command::Load(args) => loader::run(args.resolve(&file_config)).await, + Command::Load(args) => loader::run(args.resolve(&file_config)?).await, Command::Driver(args) => driver::run(args.resolve(&file_config)?).await, Command::Coordinator(args) => coordinator::run(args.resolve(&file_config)?).await, } diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index 8d21c1c11f3..058e223d46d 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -1,6 +1,7 @@ use anyhow::{Context, Result}; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use std::ops::Range; +use std::thread; use std::time::SystemTime; use crate::client::ModuleClient; @@ -19,15 +20,38 @@ const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; pub async fn run(config: LoadConfig) -> Result<()> { log::info!( - "Loading tpcc dataset into {} databases, all running on {}", + "Loading tpcc dataset into {} databases on {} with parallelism {}", config.num_databases, - config.connection.uri + config.connection.uri, + config.load_parallelism ); let topology = DatabaseTopology::for_load(&config).await?; + let chunks = database_number_chunks(config.num_databases, config.load_parallelism); + let mut handles = Vec::with_capacity(chunks.len()); + + for (worker_idx, chunk) in chunks.into_iter().enumerate() { + let config = config.clone(); + let topology = topology.clone(); + let thread_name = format!("tpcc-load-{worker_idx}"); + let handle = thread::Builder::new() + .name(thread_name.clone()) + .spawn(move || -> Result<()> { + for database_number in chunk { + configure_one_database(&config, database_number, &topology)?; + } + Ok(()) + }) + .with_context(|| format!("failed to spawn {thread_name}"))?; + handles.push(handle); + } - for database_number in 0..config.num_databases { - configure_one_database(&config, database_number, &topology)?; + for handle in handles { + match handle.join() { + Ok(Ok(())) => {} + Ok(Err(err)) => return Err(err), + Err(_) => anyhow::bail!("loader worker thread panicked"), + } } log::info!("tpcc load finished"); @@ -35,6 +59,15 @@ pub async fn run(config: LoadConfig) -> Result<()> { Ok(()) } +fn database_number_chunks(num_databases: u16, parallelism: usize) -> Vec> { + let database_numbers: Vec = (0..num_databases).collect(); + let chunk_size = database_numbers.len().div_ceil(parallelism); + database_numbers + .chunks(chunk_size) + .map(|chunk| chunk.to_vec()) + .collect() +} + fn configure_one_database(config: &LoadConfig, database_number: u16, topology: &DatabaseTopology) -> Result<()> { let database = topology.identity_for_database_number(database_number)?; log::info!( From 91ac6aa44a0109f62e2206a44d233cf07319f657 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Sat, 28 Mar 2026 11:41:44 -0700 Subject: [PATCH 23/38] fix delivery; auto-inc its scheduled_id --- modules/tpcc/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index eb146e748f1..566b3c508ef 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -598,6 +598,7 @@ pub fn run_delivery_job(ctx: &ReducerContext, job: DeliveryJob) -> Result<(), St } else { next_job.next_d_id += 1; next_job.scheduled_at = ctx.timestamp.into(); + next_job.scheduled_id = 0; ctx.db.delivery_job().insert(next_job); } From 4781c22967ad18e8fdc612c001ba5ad2d4ab98be Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 14:44:31 -0400 Subject: [PATCH 24/38] Vibecode: reducer return values. Initial Codex-assissted prototype of reducer return values. Implemented only for Rust modules and the Rust client SDK, plus the HTTP API. --- crates/bindings-macro/src/reducer.rs | 18 ++-- crates/bindings/src/lib.rs | 2 +- crates/bindings/src/rt.rs | 69 ++++++++++----- crates/client-api/src/routes/database.rs | 49 +++++++++-- crates/codegen/src/rust.rs | 19 ++--- crates/core/src/host/module_host.rs | 85 +++++++++++++++++++ .../src/host/wasm_common/module_host_actor.rs | 29 ++++++- .../core/src/host/wasmtime/wasmtime_module.rs | 5 +- crates/lib/src/db/raw_def/v10.rs | 30 ++++++- crates/schema/src/def.rs | 3 + crates/schema/src/def/validate/v10.rs | 21 ++++- crates/schema/src/def/validate/v9.rs | 14 ++- crates/schema/src/error.rs | 5 ++ .../modules/call-reducer-procedure/src/lib.rs | 5 ++ crates/smoketests/tests/smoketests/call.rs | 23 +++++ modules/sdk-test/src/lib.rs | 5 ++ sdks/rust/src/callbacks.rs | 2 +- sdks/rust/src/db_connection.rs | 28 ++++-- .../procedure-client/src/test_handlers.rs | 2 +- .../delete_from_btree_u_32_reducer.rs | 2 +- .../delete_large_table_reducer.rs | 2 +- .../module_bindings/delete_pk_bool_reducer.rs | 3 +- .../delete_pk_connection_id_reducer.rs | 2 +- .../delete_pk_i_128_reducer.rs | 3 +- .../module_bindings/delete_pk_i_16_reducer.rs | 3 +- .../delete_pk_i_256_reducer.rs | 3 +- .../module_bindings/delete_pk_i_32_reducer.rs | 3 +- .../module_bindings/delete_pk_i_64_reducer.rs | 3 +- .../module_bindings/delete_pk_i_8_reducer.rs | 3 +- .../delete_pk_identity_reducer.rs | 2 +- .../delete_pk_string_reducer.rs | 2 +- .../delete_pk_u_128_reducer.rs | 3 +- .../module_bindings/delete_pk_u_16_reducer.rs | 3 +- .../delete_pk_u_256_reducer.rs | 3 +- ...lete_pk_u_32_insert_pk_u_32_two_reducer.rs | 2 +- .../module_bindings/delete_pk_u_32_reducer.rs | 3 +- .../delete_pk_u_32_two_reducer.rs | 2 +- .../module_bindings/delete_pk_u_64_reducer.rs | 3 +- .../module_bindings/delete_pk_u_8_reducer.rs | 3 +- .../module_bindings/delete_pk_uuid_reducer.rs | 3 +- .../delete_unique_bool_reducer.rs | 2 +- .../delete_unique_connection_id_reducer.rs | 2 +- .../delete_unique_i_128_reducer.rs | 2 +- .../delete_unique_i_16_reducer.rs | 2 +- .../delete_unique_i_256_reducer.rs | 2 +- .../delete_unique_i_32_reducer.rs | 2 +- .../delete_unique_i_64_reducer.rs | 2 +- .../delete_unique_i_8_reducer.rs | 2 +- .../delete_unique_identity_reducer.rs | 2 +- .../delete_unique_string_reducer.rs | 2 +- .../delete_unique_u_128_reducer.rs | 2 +- .../delete_unique_u_16_reducer.rs | 2 +- .../delete_unique_u_256_reducer.rs | 2 +- .../delete_unique_u_32_reducer.rs | 2 +- .../delete_unique_u_64_reducer.rs | 2 +- .../delete_unique_u_8_reducer.rs | 2 +- .../delete_unique_uuid_reducer.rs | 2 +- .../insert_call_timestamp_reducer.rs | 2 +- .../insert_call_uuid_v_4_reducer.rs | 3 +- .../insert_call_uuid_v_7_reducer.rs | 3 +- ...insert_caller_one_connection_id_reducer.rs | 2 +- .../insert_caller_one_identity_reducer.rs | 2 +- .../insert_caller_pk_connection_id_reducer.rs | 2 +- .../insert_caller_pk_identity_reducer.rs | 2 +- ...ert_caller_unique_connection_id_reducer.rs | 2 +- .../insert_caller_unique_identity_reducer.rs | 2 +- ...insert_caller_vec_connection_id_reducer.rs | 2 +- .../insert_caller_vec_identity_reducer.rs | 2 +- .../insert_into_btree_u_32_reducer.rs | 2 +- ...insert_into_indexed_simple_enum_reducer.rs | 2 +- .../insert_into_pk_btree_u_32_reducer.rs | 2 +- .../insert_large_table_reducer.rs | 2 +- .../insert_one_bool_reducer.rs | 3 +- .../insert_one_byte_struct_reducer.rs | 2 +- .../insert_one_connection_id_reducer.rs | 2 +- .../insert_one_enum_with_payload_reducer.rs | 2 +- ...sert_one_every_primitive_struct_reducer.rs | 2 +- .../insert_one_every_vec_struct_reducer.rs | 2 +- .../insert_one_f_32_reducer.rs | 3 +- .../insert_one_f_64_reducer.rs | 3 +- .../insert_one_i_128_reducer.rs | 3 +- .../insert_one_i_16_reducer.rs | 3 +- .../insert_one_i_256_reducer.rs | 3 +- .../insert_one_i_32_reducer.rs | 3 +- .../insert_one_i_64_reducer.rs | 3 +- .../module_bindings/insert_one_i_8_reducer.rs | 3 +- .../insert_one_identity_reducer.rs | 2 +- .../insert_one_simple_enum_reducer.rs | 2 +- .../insert_one_string_reducer.rs | 2 +- .../insert_one_timestamp_reducer.rs | 2 +- .../insert_one_u_128_reducer.rs | 3 +- .../insert_one_u_16_reducer.rs | 3 +- .../insert_one_u_256_reducer.rs | 3 +- .../insert_one_u_32_reducer.rs | 3 +- .../insert_one_u_64_reducer.rs | 3 +- .../module_bindings/insert_one_u_8_reducer.rs | 3 +- .../insert_one_unit_struct_reducer.rs | 2 +- .../insert_one_uuid_reducer.rs | 3 +- ...t_option_every_primitive_struct_reducer.rs | 2 +- .../insert_option_i_32_reducer.rs | 2 +- .../insert_option_identity_reducer.rs | 2 +- .../insert_option_simple_enum_reducer.rs | 2 +- .../insert_option_string_reducer.rs | 2 +- .../insert_option_uuid_reducer.rs | 2 +- .../insert_option_vec_option_i_32_reducer.rs | 2 +- .../module_bindings/insert_pk_bool_reducer.rs | 2 +- .../insert_pk_connection_id_reducer.rs | 2 +- .../insert_pk_i_128_reducer.rs | 2 +- .../module_bindings/insert_pk_i_16_reducer.rs | 2 +- .../insert_pk_i_256_reducer.rs | 2 +- .../module_bindings/insert_pk_i_32_reducer.rs | 2 +- .../module_bindings/insert_pk_i_64_reducer.rs | 2 +- .../module_bindings/insert_pk_i_8_reducer.rs | 2 +- .../insert_pk_identity_reducer.rs | 2 +- .../insert_pk_simple_enum_reducer.rs | 2 +- .../insert_pk_string_reducer.rs | 2 +- .../insert_pk_u_128_reducer.rs | 2 +- .../module_bindings/insert_pk_u_16_reducer.rs | 2 +- .../insert_pk_u_256_reducer.rs | 2 +- .../module_bindings/insert_pk_u_32_reducer.rs | 2 +- .../insert_pk_u_32_two_reducer.rs | 2 +- .../module_bindings/insert_pk_u_64_reducer.rs | 2 +- .../module_bindings/insert_pk_u_8_reducer.rs | 2 +- .../module_bindings/insert_pk_uuid_reducer.rs | 2 +- .../insert_primitives_as_strings_reducer.rs | 2 +- ...t_every_primitive_struct_string_reducer.rs | 2 +- .../insert_result_i_32_string_reducer.rs | 2 +- .../insert_result_identity_string_reducer.rs | 2 +- .../insert_result_simple_enum_i_32_reducer.rs | 2 +- .../insert_result_string_i_32_reducer.rs | 2 +- .../insert_result_vec_i_32_string_reducer.rs | 2 +- .../insert_table_holds_table_reducer.rs | 2 +- .../insert_unique_bool_reducer.rs | 2 +- .../insert_unique_connection_id_reducer.rs | 2 +- .../insert_unique_i_128_reducer.rs | 2 +- .../insert_unique_i_16_reducer.rs | 2 +- .../insert_unique_i_256_reducer.rs | 2 +- .../insert_unique_i_32_reducer.rs | 2 +- .../insert_unique_i_64_reducer.rs | 2 +- .../insert_unique_i_8_reducer.rs | 2 +- .../insert_unique_identity_reducer.rs | 2 +- .../insert_unique_string_reducer.rs | 2 +- .../insert_unique_u_128_reducer.rs | 2 +- .../insert_unique_u_16_reducer.rs | 2 +- .../insert_unique_u_256_reducer.rs | 2 +- .../insert_unique_u_32_reducer.rs | 2 +- ...sert_unique_u_32_update_pk_u_32_reducer.rs | 2 +- .../insert_unique_u_64_reducer.rs | 2 +- .../insert_unique_u_8_reducer.rs | 2 +- .../insert_unique_uuid_reducer.rs | 2 +- .../module_bindings/insert_user_reducer.rs | 2 +- .../insert_vec_bool_reducer.rs | 3 +- .../insert_vec_byte_struct_reducer.rs | 2 +- .../insert_vec_connection_id_reducer.rs | 2 +- .../insert_vec_enum_with_payload_reducer.rs | 2 +- ...sert_vec_every_primitive_struct_reducer.rs | 2 +- .../insert_vec_every_vec_struct_reducer.rs | 2 +- .../insert_vec_f_32_reducer.rs | 3 +- .../insert_vec_f_64_reducer.rs | 3 +- .../insert_vec_i_128_reducer.rs | 3 +- .../insert_vec_i_16_reducer.rs | 3 +- .../insert_vec_i_256_reducer.rs | 3 +- .../insert_vec_i_32_reducer.rs | 3 +- .../insert_vec_i_64_reducer.rs | 3 +- .../module_bindings/insert_vec_i_8_reducer.rs | 3 +- .../insert_vec_identity_reducer.rs | 2 +- .../insert_vec_simple_enum_reducer.rs | 2 +- .../insert_vec_string_reducer.rs | 2 +- .../insert_vec_timestamp_reducer.rs | 2 +- .../insert_vec_u_128_reducer.rs | 3 +- .../insert_vec_u_16_reducer.rs | 3 +- .../insert_vec_u_256_reducer.rs | 3 +- .../insert_vec_u_32_reducer.rs | 3 +- .../insert_vec_u_64_reducer.rs | 3 +- .../module_bindings/insert_vec_u_8_reducer.rs | 3 +- .../insert_vec_unit_struct_reducer.rs | 2 +- .../insert_vec_uuid_reducer.rs | 3 +- .../test-client/src/module_bindings/mod.rs | 13 ++- .../module_bindings/no_op_succeeds_reducer.rs | 3 +- .../src/module_bindings/return_sum_reducer.rs | 69 +++++++++++++++ .../send_scheduled_message_reducer.rs | 2 +- .../sorted_uuids_insert_reducer.rs | 2 +- .../update_indexed_simple_enum_reducer.rs | 2 +- .../module_bindings/update_pk_bool_reducer.rs | 2 +- .../update_pk_connection_id_reducer.rs | 2 +- .../update_pk_i_128_reducer.rs | 2 +- .../module_bindings/update_pk_i_16_reducer.rs | 2 +- .../update_pk_i_256_reducer.rs | 2 +- .../module_bindings/update_pk_i_32_reducer.rs | 2 +- .../module_bindings/update_pk_i_64_reducer.rs | 2 +- .../module_bindings/update_pk_i_8_reducer.rs | 2 +- .../update_pk_identity_reducer.rs | 2 +- .../update_pk_simple_enum_reducer.rs | 2 +- .../update_pk_string_reducer.rs | 2 +- .../update_pk_u_128_reducer.rs | 2 +- .../module_bindings/update_pk_u_16_reducer.rs | 2 +- .../update_pk_u_256_reducer.rs | 2 +- .../module_bindings/update_pk_u_32_reducer.rs | 2 +- .../update_pk_u_32_two_reducer.rs | 2 +- .../module_bindings/update_pk_u_64_reducer.rs | 2 +- .../module_bindings/update_pk_u_8_reducer.rs | 2 +- .../module_bindings/update_pk_uuid_reducer.rs | 2 +- .../update_unique_bool_reducer.rs | 2 +- .../update_unique_connection_id_reducer.rs | 2 +- .../update_unique_i_128_reducer.rs | 2 +- .../update_unique_i_16_reducer.rs | 2 +- .../update_unique_i_256_reducer.rs | 2 +- .../update_unique_i_32_reducer.rs | 2 +- .../update_unique_i_64_reducer.rs | 2 +- .../update_unique_i_8_reducer.rs | 2 +- .../update_unique_identity_reducer.rs | 2 +- .../update_unique_string_reducer.rs | 2 +- .../update_unique_u_128_reducer.rs | 2 +- .../update_unique_u_16_reducer.rs | 2 +- .../update_unique_u_256_reducer.rs | 2 +- .../update_unique_u_32_reducer.rs | 2 +- .../update_unique_u_64_reducer.rs | 2 +- .../update_unique_u_8_reducer.rs | 2 +- .../update_unique_uuid_reducer.rs | 2 +- .../tests/test-client/src/pk_test_table.rs | 6 +- .../test-client/src/simple_test_table.rs | 2 +- .../tests/test-client/src/test_handlers.rs | 54 ++++++++++-- .../test-client/src/unique_test_table.rs | 4 +- sdks/rust/tests/test.rs | 12 +++ .../tests/view-client/src/test_handlers.rs | 6 +- .../tests/view-pk-client/src/test_handlers.rs | 6 +- 226 files changed, 746 insertions(+), 285 deletions(-) create mode 100644 sdks/rust/tests/test-client/src/module_bindings/return_sum_reducer.rs diff --git a/crates/bindings-macro/src/reducer.rs b/crates/bindings-macro/src/reducer.rs index ac261ced35f..48c55a62b9b 100644 --- a/crates/bindings-macro/src/reducer.rs +++ b/crates/bindings-macro/src/reducer.rs @@ -122,12 +122,11 @@ pub(crate) fn reducer_impl(args: ReducerArgs, original_function: &ItemFn) -> syn let first_arg_ty = arg_tys.first().into_iter(); let rest_arg_tys = arg_tys.iter().skip(1); - // Extract the return type. - let ret_ty = match &original_function.sig.output { - syn::ReturnType::Default => None, - syn::ReturnType::Type(_, t) => Some(&**t), - } - .into_iter(); + // Extract the return type (defaulting to `()`). + let ret_ty_for_info: syn::Type = match &original_function.sig.output { + syn::ReturnType::Default => syn::parse_quote!(()), + syn::ReturnType::Type(_, t) => (**t).clone(), + }; let register_describer_symbol = format!("__preinit__20_register_describer_{}", reducer_name.value()); @@ -151,7 +150,7 @@ pub(crate) fn reducer_impl(args: ReducerArgs, original_function: &ItemFn) -> syn fn _assert_args #lt_params () #lt_where_clause { #(let _ = <#first_arg_ty as spacetimedb::rt::ReducerContextArg>::_ITEM;)* #(let _ = <#rest_arg_tys as spacetimedb::rt::ReducerArg>::_ITEM;)* - #(let _ = <#ret_ty as spacetimedb::rt::IntoReducerResult>::into_result;)* + let _ = <#ret_ty_for_info as spacetimedb::rt::IntoReducerResult>::into_result; } }; impl #func_name { @@ -168,6 +167,11 @@ pub(crate) fn reducer_impl(args: ReducerArgs, original_function: &ItemFn) -> syn #(const LIFECYCLE: Option = Some(#lifecycle);)* const ARG_NAMES: &'static [Option<&'static str>] = &[#(#opt_arg_names),*]; const INVOKE: Self::Invoke = #func_name::invoke; + fn return_type( + ts: &mut impl spacetimedb::sats::typespace::TypespaceBuilder, + ) -> Option { + Some(<#ret_ty_for_info as spacetimedb::rt::IntoReducerResult>::ok_return_type(ts)) + } } #generate_explicit_names diff --git a/crates/bindings/src/lib.rs b/crates/bindings/src/lib.rs index 9e02a3a97f0..a7b0b1455d1 100644 --- a/crates/bindings/src/lib.rs +++ b/crates/bindings/src/lib.rs @@ -56,7 +56,7 @@ pub use table::{ UniqueColumn, UniqueColumnReadOnly, UniqueConstraintViolation, }; -pub type ReducerResult = core::result::Result<(), Box>; +pub type ReducerResult = core::result::Result>, Box>; pub type ProcedureResult = Vec; diff --git a/crates/bindings/src/rt.rs b/crates/bindings/src/rt.rs index d6d55eba5f4..bf87da4bfb7 100644 --- a/crates/bindings/src/rt.rs +++ b/crates/bindings/src/rt.rs @@ -48,7 +48,7 @@ pub fn invoke_reducer<'a, A: Args<'a>>( reducer: impl Reducer<'a, A>, ctx: &ReducerContext, args: &'a [u8], -) -> Result<(), Box> { +) -> Result>, Box> { // Deserialize the arguments from a bsatn encoding. let SerDeArgs(args) = bsatn::from_slice(args).expect("unable to decode args"); @@ -76,8 +76,8 @@ pub fn invoke_procedure<'a, A: Args<'a>, Ret: IntoProcedureResult>( label = "this reducer signature is not valid", note = "", note = "reducer signatures must match the following pattern:", - note = " `Fn(&ReducerContext, [T1, ...]) [-> Result<(), impl Display>]`", - note = "where each `Ti` type implements `SpacetimeType`.", + note = " `Fn(&ReducerContext, [T1, ...]) [-> Result]`", + note = "where each `Ti` and `T` type implements `SpacetimeType`.", note = "" )] pub trait Reducer<'de, A: Args<'de>> { @@ -170,7 +170,7 @@ pub trait FnInfo: ExplicitNames { const INVOKE: Self::Invoke; /// The return type of this function. - /// Currently only implemented for views. + /// Implemented for reducers, procedures, and views. fn return_type(_ts: &mut impl TypespaceBuilder) -> Option { None } @@ -203,23 +203,43 @@ pub trait Args<'de>: Sized { /// A trait of types representing the result of executing a reducer. #[diagnostic::on_unimplemented( message = "`{Self}` is not a valid reducer return type", - note = "reducers cannot return values -- you can only return `()` or `Result<(), impl Display>`" + note = "reducers can return `()` or `Result` where `T: SpacetimeType`" )] pub trait IntoReducerResult { /// Convert the result into form where there is no value /// and the error message is a string. - fn into_result(self) -> Result<(), Box>; + fn into_result(self) -> Result>, Box>; + + /// Return the ok type for schema generation. + fn ok_return_type(ts: &mut impl TypespaceBuilder) -> AlgebraicType; } impl IntoReducerResult for () { #[inline] - fn into_result(self) -> Result<(), Box> { - Ok(self) + fn into_result(self) -> Result>, Box> { + Ok(None) + } + + #[inline] + fn ok_return_type(_ts: &mut impl TypespaceBuilder) -> AlgebraicType { + ProductType::unit().into() } } -impl IntoReducerResult for Result<(), E> { +impl IntoReducerResult for Result +where + T: SpacetimeType + Serialize, + E: fmt::Display, +{ #[inline] - fn into_result(self) -> Result<(), Box> { - self.map_err(|e| e.to_string().into()) + fn into_result(self) -> Result>, Box> { + self.map(|value| { + Some(bsatn::to_vec(&value).expect("Failed to serialize reducer return value")) + }) + .map_err(|e| e.to_string().into()) + } + + #[inline] + fn ok_return_type(ts: &mut impl TypespaceBuilder) -> AlgebraicType { + T::make_type(ts) } } @@ -625,7 +645,7 @@ macro_rules! impl_reducer_procedure_view { Ret: IntoReducerResult { #[allow(non_snake_case)] - fn invoke(&self, ctx: &ReducerContext, args: ($($T,)*)) -> Result<(), Box> { + fn invoke(&self, ctx: &ReducerContext, args: ($($T,)*)) -> Result>, Box> { let ($($T,)*) = args; self(ctx, $($T),*).into_result() } @@ -785,10 +805,15 @@ impl From> for RawIndexAlgorithm { pub fn register_reducer<'a, A: Args<'a>, I: FnInfo>(_: impl Reducer<'a, A>) { register_describer(|module| { let params = A::schema::(&mut module.inner); + let ok_return_type = I::return_type(&mut module.inner).unwrap_or_else(|| ProductType::unit().into()); if let Some(lifecycle) = I::LIFECYCLE { - module.inner.add_lifecycle_reducer(lifecycle, I::NAME, params); + module + .inner + .add_lifecycle_reducer_with_ok_return_type(lifecycle, I::NAME, params, ok_return_type); } else { - module.inner.add_reducer(I::NAME, params); + module + .inner + .add_reducer_with_ok_return_type(I::NAME, params, ok_return_type); } module.reducers.push(I::INVOKE); @@ -978,8 +1003,8 @@ extern "C" fn __describe_module__(description: BytesSink) { /// /// The `error` is a `BytesSink`, registered on the host side, /// which can be written to with `bytes_sink_write`. -/// When `error` is written to, -/// it is expected that `HOST_CALL_FAILURE` is returned. +/// On success, reducers write their return value to this sink (if any). +/// On error, reducers write the error message to this sink and return `HOST_CALL_FAILURE`. /// Otherwise, `0` should be returned, i.e., the reducer completed successfully. /// Note that in the future, more failure codes could be supported. #[unsafe(no_mangle)] @@ -1010,8 +1035,8 @@ extern "C" fn __call_reducer__( let reducers = REDUCERS.get().unwrap(); // Dispatch to it with the arguments read. let res = with_read_args(args, |args| reducers[id](&ctx, args)); - // Convert any error message to an error code and writes to the `error` sink. - convert_err_to_errno(res, error) + // Convert any error message to an error code and write return bytes on success. + convert_reducer_result_to_errno(res, error) } /// Reconstruct the `sender_i` args to [`__call_reducer__`] and [`__call_procedure__`] into an [`Identity`]. @@ -1038,9 +1063,13 @@ fn reconstruct_connection_id(conn_id_0: u64, conn_id_1: u64) -> Option>, out: BytesSink) -> i16 { +fn convert_reducer_result_to_errno(res: Result>, Box>, out: BytesSink) -> i16 { match res { - Ok(()) => 0, + Ok(Some(bytes)) => { + write_to_sink(out, &bytes); + 0 + } + Ok(None) => 0, Err(msg) => { write_to_sink(out, msg.as_bytes()); errno::HOST_CALL_FAILURE.get() as i16 diff --git a/crates/client-api/src/routes/database.rs b/crates/client-api/src/routes/database.rs index 5b1397e475c..cc7ce935000 100644 --- a/crates/client-api/src/routes/database.rs +++ b/crates/client-api/src/routes/database.rs @@ -37,6 +37,8 @@ use spacetimedb_client_api_messages::name::{ self, DatabaseName, DomainName, MigrationPolicy, PrePublishAutoMigrateResult, PrePublishManualMigrateResult, PrePublishResult, PrettyPrintStyle, PublishOp, PublishResult, }; +use spacetimedb_lib::bsatn; +use spacetimedb_lib::de::DeserializeSeed; use spacetimedb_lib::db::raw_def::v10::RawModuleDefV10; use spacetimedb_lib::db::raw_def::v9::RawModuleDefV9; use spacetimedb_lib::{sats, AlgebraicValue, Hash, ProductValue, Timestamp}; @@ -168,8 +170,9 @@ pub async fn call( .await .map_err(client_connected_error_to_response)?; + let mut reducer_return_value: Option = None; let result = match module - .call_reducer( + .call_reducer_with_return( caller_identity, Some(connection_id), None, @@ -180,7 +183,10 @@ pub async fn call( ) .await { - Ok(rcr) => Ok(CallResult::Reducer(rcr)), + Ok((rcr, return_value)) => { + reducer_return_value = return_value; + Ok(CallResult::Reducer(rcr)) + } Err(ReducerCallError::NoSuchReducer | ReducerCallError::ScheduleReducerNotFound) => { // Not a reducer — try procedure instead match module @@ -202,7 +208,8 @@ pub async fn call( match result { Ok(CallResult::Reducer(result)) => { - let (status, body) = reducer_outcome_response(&owner_identity, &reducer, result.outcome); + let (status, body) = + reducer_outcome_response(&module, &owner_identity, &reducer, result.outcome, reducer_return_value)?; Ok(( status, TypedHeader(SpacetimeEnergyUsed(result.energy_used)), @@ -228,19 +235,47 @@ pub async fn call( } fn reducer_outcome_response( + module: &ModuleHost, owner_identity: &Identity, reducer: &str, outcome: ReducerOutcome, -) -> (StatusCode, Box) { + reducer_return_value: Option, +) -> axum::response::Result<(StatusCode, axum::response::Response)> { match outcome { - ReducerOutcome::Committed => (StatusCode::OK, "".into()), + ReducerOutcome::Committed => { + let return_value = match module.info.module_def.reducer_full(reducer) { + Some((_, reducer_def)) => reducer_def.ok_return_type.clone(), + None => { + return Err((StatusCode::NOT_FOUND, format!("No such reducer {reducer}")).into()); + } + }; + + if let Some(bytes) = reducer_return_value.filter(|value| !value.is_empty()) { + let seed = sats::WithTypespace::new(module.info.module_def.typespace(), &return_value); + let mut reader = &bytes[..]; + let value: AlgebraicValue = seed + .deserialize(bsatn::Deserializer::new(&mut reader)) + .map_err(|err| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to decode reducer return value: {err}"), + ) + })?; + Ok(( + StatusCode::OK, + axum::Json(sats::serde::SerdeWrapper(value)).into_response(), + )) + } else { + Ok((StatusCode::OK, "".into_response())) + } + } ReducerOutcome::Failed(errmsg) => { // TODO: different status code? this is what cloudflare uses, sorta - (StatusCode::from_u16(530).unwrap(), *errmsg) + Ok((StatusCode::from_u16(530).unwrap(), (*errmsg).into_response())) } ReducerOutcome::BudgetExceeded => { log::warn!("Node's energy budget exceeded for identity: {owner_identity} while executing {reducer}"); - (StatusCode::PAYMENT_REQUIRED, "Module energy budget exhausted.".into()) + Ok((StatusCode::PAYMENT_REQUIRED, "Module energy budget exhausted.".into_response())) } } } diff --git a/crates/codegen/src/rust.rs b/crates/codegen/src/rust.rs index 638d6f29ec8..7305bb6c08a 100644 --- a/crates/codegen/src/rust.rs +++ b/crates/codegen/src/rust.rs @@ -379,13 +379,10 @@ pub(super) fn parse_table_update( out.newline(); - gen_and_print_imports( - module, - out, - &reducer.params_for_generate.elements, - // No need to skip any imports; we're not emitting a type that other modules can import. - &[], - ); + let mut imports = Imports::new(); + gen_imports(&mut imports, &reducer.params_for_generate.elements); + add_one_import(&mut imports, &reducer.ok_return_type_for_generate); + print_imports(module, out, imports); out.newline(); @@ -393,6 +390,7 @@ pub(super) fn parse_table_update( let func_name = reducer_function_name(reducer); let args_type = function_args_type_name(&reducer.accessor_name); let enum_variant_name = reducer_variant_name(&reducer.accessor_name); + let ok_ty_name = type_name(module, &reducer.ok_return_type_for_generate); // Define an "args struct" for the reducer. // This is not user-facing (note the `pub(super)` visibility); @@ -484,7 +482,7 @@ pub trait {func_name} {{ fn {func_name}_then( &self, {arglist_no_delimiters} - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + Send + 'static, ) -> __sdk::Result<()>; @@ -494,11 +492,12 @@ impl {func_name} for super::RemoteReducers {{ fn {func_name}_then( &self, {arglist_no_delimiters} - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + Send + 'static, ) -> __sdk::Result<()> {{ - self.imp.invoke_reducer_with_callback({args_type} {{ {arg_names} }}, callback) + self.imp + .invoke_reducer_with_callback::<_, {ok_ty_name}>({args_type} {{ {arg_names} }}, callback) }} }} " diff --git a/crates/core/src/host/module_host.rs b/crates/core/src/host/module_host.rs index 6ed0f0ec97c..929db1b8004 100644 --- a/crates/core/src/host/module_host.rs +++ b/crates/core/src/host/module_host.rs @@ -1605,6 +1605,41 @@ impl ModuleHost { .await? } + async fn call_reducer_inner_with_return( + &self, + caller_identity: Identity, + caller_connection_id: Option, + client: Option>, + request_id: Option, + timer: Option, + reducer_id: ReducerId, + reducer_def: &ReducerDef, + args: FunctionArgs, + ) -> Result<(ReducerCallResult, Option), ReducerCallError> { + let args = args + .into_tuple_for_def(&self.info.module_def, reducer_def) + .map_err(InvalidReducerArguments)?; + let caller_connection_id = caller_connection_id.unwrap_or(ConnectionId::ZERO); + let call_reducer_params = CallReducerParams { + timestamp: Timestamp::now(), + caller_identity, + caller_connection_id, + client, + request_id, + timer, + reducer_id, + args, + }; + + self.call( + &reducer_def.name, + call_reducer_params, + async |p, inst| Ok(inst.call_reducer_with_return(p)), + async |p, inst| inst.call_reducer(p).await.map(|res| (res, None)), + ) + .await? + } + pub async fn call_reducer( &self, caller_identity: Identity, @@ -1655,6 +1690,56 @@ impl ModuleHost { res } + pub async fn call_reducer_with_return( + &self, + caller_identity: Identity, + caller_connection_id: Option, + client: Option>, + request_id: Option, + timer: Option, + reducer_name: &str, + args: FunctionArgs, + ) -> Result<(ReducerCallResult, Option), ReducerCallError> { + let res = async { + let (reducer_id, reducer_def) = self + .info + .module_def + .reducer_full(reducer_name) + .ok_or(ReducerCallError::NoSuchReducer)?; + if let Some(lifecycle) = reducer_def.lifecycle { + return Err(ReducerCallError::LifecycleReducer(lifecycle)); + } + + if reducer_def.visibility.is_private() && !self.is_database_owner(caller_identity) { + return Err(ReducerCallError::NoSuchReducer); + } + + self.call_reducer_inner_with_return( + caller_identity, + caller_connection_id, + client, + request_id, + timer, + reducer_id, + reducer_def, + args, + ) + .await + } + .await; + + let log_message = match &res { + Err(ReducerCallError::NoSuchReducer) => Some(no_such_function_log_message("reducer", reducer_name)), + Err(ReducerCallError::Args(_)) => Some(args_error_log_message("reducer", reducer_name)), + _ => None, + }; + if let Some(log_message) = log_message { + self.inject_logs(LogLevel::Error, reducer_name, &log_message) + } + + res + } + pub async fn call_view_add_single_subscription( &self, sender: Arc, diff --git a/crates/core/src/host/wasm_common/module_host_actor.rs b/crates/core/src/host/wasm_common/module_host_actor.rs index a711e0f18dc..7898a4f205a 100644 --- a/crates/core/src/host/wasm_common/module_host_actor.rs +++ b/crates/core/src/host/wasm_common/module_host_actor.rs @@ -467,6 +467,12 @@ impl WasmModuleInstance { res } + pub fn call_reducer_with_return(&mut self, params: CallReducerParams) -> (ReducerCallResult, Option) { + let (res, return_value, trapped) = self.call_reducer_with_tx_and_return(None, params); + self.trapped = trapped; + (res, return_value) + } + pub fn clear_all_clients(&self) -> anyhow::Result<()> { self.common.clear_all_clients() } @@ -545,6 +551,17 @@ impl WasmModuleInstance { }) } + fn call_reducer_with_tx_and_return( + &mut self, + tx: Option, + params: CallReducerParams, + ) -> (ReducerCallResult, Option, bool) { + crate::callgrind_flag::invoke_allowing_callgrind(|| { + self.common + .call_reducer_with_tx_and_return(tx, params, &mut self.instance) + }) + } + pub fn call_view(&mut self, cmd: ViewCommand) -> ViewCommandResult { let (res, trapped) = self.common.handle_cmd(cmd, &mut self.instance); self.trapped = trapped; @@ -811,6 +828,16 @@ impl InstanceCommon { params: CallReducerParams, inst: &mut I, ) -> (ReducerCallResult, bool) { + let (res, _return_value, trapped) = self.call_reducer_with_tx_and_return(tx, params, inst); + (res, trapped) + } + + pub(crate) fn call_reducer_with_tx_and_return( + &mut self, + tx: Option, + params: CallReducerParams, + inst: &mut I, + ) -> (ReducerCallResult, Option, bool) { let CallReducerParams { timestamp, caller_identity, @@ -956,7 +983,7 @@ impl InstanceCommon { execution_duration: total_duration, }; - (res, trapped) + (res, event.reducer_return_value.clone(), trapped) } fn handle_outer_error(&mut self, energy: &EnergyStats, reducer_name: &str) -> EventStatus { diff --git a/crates/core/src/host/wasmtime/wasmtime_module.rs b/crates/core/src/host/wasmtime/wasmtime_module.rs index 48ac0fe80e2..e456c488a99 100644 --- a/crates/core/src/host/wasmtime/wasmtime_module.rs +++ b/crates/core/src/host/wasmtime/wasmtime_module.rs @@ -15,6 +15,7 @@ use crate::host::wasm_common::*; use crate::replica_context::ReplicaContext; use crate::subscription::module_subscription_manager::TransactionOffset; use crate::util::string_from_utf8_lossy_owned; +use bytes::Bytes; use futures_util::FutureExt; use spacetimedb_datastore::locking_tx_datastore::FuncCallType; use spacetimedb_lib::{bsatn, ConnectionId, Identity, RawModuleDef}; @@ -485,8 +486,8 @@ impl module_host_actor::WasmInstance for WasmtimeInstance { let call_result = call_result .map_err(ExecutionError::Trap) - .and_then(|code| handle_error_sink_code(code, error)) - .map(|_| None); + .and_then(|code| handle_result_sink_code(code, error)) + .map(|bytes| (!bytes.is_empty()).then_some(Bytes::from(bytes))); module_host_actor::ReducerExecuteResult { stats, call_result } } diff --git a/crates/lib/src/db/raw_def/v10.rs b/crates/lib/src/db/raw_def/v10.rs index a801ea286be..72fb77133da 100644 --- a/crates/lib/src/db/raw_def/v10.rs +++ b/crates/lib/src/db/raw_def/v10.rs @@ -943,11 +943,21 @@ impl RawModuleDefV10Builder { /// This is because `SpacetimeType` is not implemented for `ReducerContext`, /// so it can never act like an ordinary argument.) pub fn add_reducer(&mut self, source_name: impl Into, params: ProductType) { + self.add_reducer_with_ok_return_type(source_name, params, reducer_default_ok_return_type()); + } + + /// Add a reducer with an explicit ok return type. + pub fn add_reducer_with_ok_return_type( + &mut self, + source_name: impl Into, + params: ProductType, + ok_return_type: AlgebraicType, + ) { self.reducers_mut().push(RawReducerDefV10 { source_name: source_name.into(), params, visibility: FunctionVisibility::ClientCallable, - ok_return_type: reducer_default_ok_return_type(), + ok_return_type, err_return_type: reducer_default_err_return_type(), }); } @@ -1004,6 +1014,22 @@ impl RawModuleDefV10Builder { lifecycle_spec: Lifecycle, function_name: impl Into, params: ProductType, + ) { + self.add_lifecycle_reducer_with_ok_return_type( + lifecycle_spec, + function_name, + params, + reducer_default_ok_return_type(), + ); + } + + /// Add a lifecycle reducer assignment with an explicit ok return type. + pub fn add_lifecycle_reducer_with_ok_return_type( + &mut self, + lifecycle_spec: Lifecycle, + function_name: impl Into, + params: ProductType, + ok_return_type: AlgebraicType, ) { let function_name = function_name.into(); self.lifecycle_reducers_mut().push(RawLifeCycleReducerDefV10 { @@ -1015,7 +1041,7 @@ impl RawModuleDefV10Builder { source_name: function_name, params, visibility: FunctionVisibility::Private, - ok_return_type: reducer_default_ok_return_type(), + ok_return_type, err_return_type: reducer_default_err_return_type(), }); } diff --git a/crates/schema/src/def.rs b/crates/schema/src/def.rs index 89c201e3f85..574c1cf40bb 100644 --- a/crates/schema/src/def.rs +++ b/crates/schema/src/def.rs @@ -1678,6 +1678,9 @@ pub struct ReducerDef { /// The return type of the reducer on success. pub ok_return_type: AlgebraicType, + /// The return type of the reducer on success, formatted for client codegen. + pub ok_return_type_for_generate: AlgebraicTypeUse, + /// The return type of the reducer on error. pub err_return_type: AlgebraicType, } diff --git a/crates/schema/src/def/validate/v10.rs b/crates/schema/src/def/validate/v10.rs index 7ebbaae06d4..13790b54c9a 100644 --- a/crates/schema/src/def/validate/v10.rs +++ b/crates/schema/src/def/validate/v10.rs @@ -531,9 +531,17 @@ impl<'a> ModuleValidatorV10<'a> { arg_name, }); + let ok_return_type_for_generate = self.core.validate_for_type_use( + || TypeLocation::ReducerReturn { + reducer_name: source_name.clone(), + }, + &ok_return_type, + ); + let name_result = self.core.resolve_function_ident(source_name.clone()); - let return_res: Result<_> = (ok_return_type.is_unit() && err_return_type.is_string()) + let return_res: Result<_> = err_return_type + .is_string() .then_some((ok_return_type.clone(), err_return_type.clone())) .ok_or_else(move || { ValidationError::InvalidReducerReturnType { @@ -544,8 +552,14 @@ impl<'a> ModuleValidatorV10<'a> { .into() }); - let (name_result, accessor_name, params_for_generate, return_res) = - (name_result, accessor_name, params_for_generate, return_res).combine_errors()?; + let (name_result, accessor_name, params_for_generate, ok_return_type_for_generate, return_res) = ( + name_result, + accessor_name, + params_for_generate, + ok_return_type_for_generate, + return_res, + ) + .combine_errors()?; let (ok_return_type, err_return_type) = return_res; let reducer_name = ReducerName::new(name_result.clone()); @@ -560,6 +574,7 @@ impl<'a> ModuleValidatorV10<'a> { lifecycle: None, // V10 handles lifecycle separately visibility: visibility.into(), ok_return_type, + ok_return_type_for_generate, err_return_type, }) .map(|reducer_def| (name_result, reducer_def)) diff --git a/crates/schema/src/def/validate/v9.rs b/crates/schema/src/def/validate/v9.rs index d040435afe5..0afd7d64974 100644 --- a/crates/schema/src/def/validate/v9.rs +++ b/crates/schema/src/def/validate/v9.rs @@ -347,6 +347,14 @@ impl ModuleValidatorV9<'_> { arg_name, }); + let ok_return_type = reducer_default_ok_return_type(); + let ok_return_type_for_generate = self.core.validate_for_type_use( + || TypeLocation::ReducerReturn { + reducer_name: name.clone(), + }, + &ok_return_type, + ); + // Reducers share the "function namespace" with procedures. // Uniqueness is validated in a later pass, in `check_function_names_are_unique`. let name = identifier(name); @@ -360,7 +368,8 @@ impl ModuleValidatorV9<'_> { Some(_) => Err(ValidationError::DuplicateLifecycle { lifecycle }.into()), }) .transpose(); - let (reducer_name, params_for_generate, lifecycle) = (name, params_for_generate, lifecycle).combine_errors()?; + let (reducer_name, params_for_generate, lifecycle, ok_return_type_for_generate) = + (name, params_for_generate, lifecycle, ok_return_type_for_generate).combine_errors()?; let name = ReducerName::new(reducer_name.clone()); let def = ReducerDef { name: name.clone(), @@ -372,7 +381,8 @@ impl ModuleValidatorV9<'_> { }, lifecycle, visibility: FunctionVisibility::ClientCallable, - ok_return_type: reducer_default_ok_return_type(), + ok_return_type, + ok_return_type_for_generate, err_return_type: reducer_default_err_return_type(), }; Ok((reducer_name, def)) diff --git a/crates/schema/src/error.rs b/crates/schema/src/error.rs index 06f284998b5..df62dadfdff 100644 --- a/crates/schema/src/error.rs +++ b/crates/schema/src/error.rs @@ -195,6 +195,8 @@ pub enum TypeLocation { position: usize, arg_name: Option, }, + /// A reducer return type. + ReducerReturn { reducer_name: RawIdentifier }, /// A procedure argument. ProcedureArg { procedure_name: RawIdentifier, @@ -232,6 +234,9 @@ impl fmt::Display for TypeLocation { } Ok(()) } + TypeLocation::ReducerReturn { reducer_name } => { + write!(f, "reducer `{reducer_name}` return value") + } TypeLocation::ProcedureArg { procedure_name, position, diff --git a/crates/smoketests/modules/call-reducer-procedure/src/lib.rs b/crates/smoketests/modules/call-reducer-procedure/src/lib.rs index 5415eb0d3de..f96f77b6e57 100644 --- a/crates/smoketests/modules/call-reducer-procedure/src/lib.rs +++ b/crates/smoketests/modules/call-reducer-procedure/src/lib.rs @@ -10,6 +10,11 @@ pub fn say_hello(_ctx: &ReducerContext) { log::info!("Hello, World!"); } +#[spacetimedb::reducer] +pub fn return_greeting(_ctx: &ReducerContext) -> Result { + Ok("Hello".to_owned()) +} + #[spacetimedb::procedure] pub fn return_person(_ctx: &mut ProcedureContext) -> Person { return Person { diff --git a/crates/smoketests/tests/smoketests/call.rs b/crates/smoketests/tests/smoketests/call.rs index 9c033979bb7..b33d4013e89 100644 --- a/crates/smoketests/tests/smoketests/call.rs +++ b/crates/smoketests/tests/smoketests/call.rs @@ -16,6 +16,27 @@ fn test_call_reducer_procedure() { assert_eq!(msg.trim(), r#"["World"]"#); } +/// Check calling a reducer with an HTTP client returns its value. +#[test] +fn test_call_reducer_http_return_value() { + let test = Smoketest::builder() + .precompiled_module("call-reducer-procedure") + .build(); + + let identity = test.database_identity.as_ref().unwrap(); + let response = test + .api_call_json( + "POST", + &format!("/v1/database/{}/call/return_greeting", identity), + "[]", + ) + .unwrap(); + + assert_eq!(response.status_code, 200); + let body = String::from_utf8_lossy(&response.body); + assert_eq!(body.trim(), r#""Hello""#); +} + /// Check calling a non-existent reducer/procedure raises error #[test] fn test_call_errors() { @@ -36,6 +57,7 @@ Error: No such reducer OR procedure `non_existent_reducer` for database `{identi Here are some existing reducers: - say_hello +- return_greeting Here are some existing procedures: - return_person" @@ -58,6 +80,7 @@ Error: No such reducer OR procedure `non_existent_procedure` for database `{iden Here are some existing reducers: - say_hello +- return_greeting Here are some existing procedures: - return_person" diff --git a/modules/sdk-test/src/lib.rs b/modules/sdk-test/src/lib.rs index 2b4a78f9afa..7ada26f93b3 100644 --- a/modules/sdk-test/src/lib.rs +++ b/modules/sdk-test/src/lib.rs @@ -734,6 +734,11 @@ fn insert_call_uuid_v7(ctx: &ReducerContext) { }); } +#[spacetimedb::reducer] +fn return_sum(_ctx: &ReducerContext, a: i32, b: i32) -> Result { + Ok(a + b) +} + #[spacetimedb::reducer] fn insert_primitives_as_strings(ctx: &ReducerContext, s: EveryPrimitiveStruct) { ctx.db.vec_string().insert(VecString { diff --git a/sdks/rust/src/callbacks.rs b/sdks/rust/src/callbacks.rs index a3fb083af1e..e2d2a2174c0 100644 --- a/sdks/rust/src/callbacks.rs +++ b/sdks/rust/src/callbacks.rs @@ -206,7 +206,7 @@ impl TableCallbacks { /// and a wrapper inserted by the SDK will destructure the contained `Event` /// before invoking the user-supplied function. pub(crate) type ReducerCallback = Box< - dyn FnOnce(&::ReducerEventContext, Result, InternalError>) + dyn FnOnce(&::ReducerEventContext, Result, InternalError>) + Send + 'static, >; diff --git a/sdks/rust/src/db_connection.rs b/sdks/rust/src/db_connection.rs index 8388945220f..4a3ee02a3e9 100644 --- a/sdks/rust/src/db_connection.rs +++ b/sdks/rust/src/db_connection.rs @@ -194,6 +194,7 @@ impl DbContextImpl { request_id, timestamp, result: Ok(Ok(update)), + ret_value, } => { let (reducer, callback) = { let mut inner = self.inner.lock().unwrap(); @@ -210,7 +211,7 @@ impl DbContextImpl { self.apply_update(update, |_| Event::Reducer(reducer_event.clone())); let reducer_event_ctx = self.make_event_ctx(reducer_event); - callback(&reducer_event_ctx, Ok(Ok(()))); + callback(&reducer_event_ctx, Ok(Ok(ret_value.unwrap_or_default()))); Ok(()) } @@ -221,6 +222,7 @@ impl DbContextImpl { request_id, timestamp, result, + ret_value: _, } => { let (status, result) = match result { Ok(Ok(_)) => { @@ -717,19 +719,28 @@ impl DbContextImpl { } /// Called by autogenerated reducer invocation methods. - pub fn invoke_reducer_with_callback( + pub fn invoke_reducer_with_callback( &self, reducer: Args, - callback: impl FnOnce(&::ReducerEventContext, Result, InternalError>) + callback: impl FnOnce(&::ReducerEventContext, Result, InternalError>) + Send + 'static, ) -> crate::Result<()> where ::Reducer: From, + RetVal: for<'a> spacetimedb_lib::de::Deserialize<'a> + 'static, { self.queue_mutation(PendingMutation::InvokeReducerWithCallback { reducer: reducer.into(), - callback: Box::new(callback), + callback: Box::new(move |ctx, result| { + let result = result.map(|inner| { + inner.map(|bytes| { + bsatn::from_slice::(&bytes[..]) + .expect("Failed to BSATN deserialize reducer return value") + }) + }); + callback(ctx, result); + }), }); Ok(()) } @@ -1278,6 +1289,7 @@ enum ParsedMessage { request_id: u32, timestamp: Timestamp, result: Result, InternalError>, + ret_value: Option, }, ProcedureResult { request_id: u32, @@ -1338,20 +1350,18 @@ async fn parse_loop( request_id, timestamp, result: Ok(Ok(M::DbUpdate::default())), + ret_value: Some(Bytes::new()), }, ws::v2::ReducerOutcome::Ok(ws::v2::ReducerOk { ret_value, transaction_update, }) => { - assert!( - ret_value.is_empty(), - "Reducer return value should be unit, i.e. 0 bytes, but got {ret_value:?}" - ); match M::DbUpdate::parse_update(transaction_update) { Ok(db_update) => ParsedMessage::ReducerResult { request_id, timestamp, result: Ok(Ok(db_update)), + ret_value: Some(ret_value), }, // Parse errors are not errors with the reducer call itself, // so they don't go to `ParsedMessage::ReducerResult`. @@ -1368,6 +1378,7 @@ async fn parse_loop( request_id, timestamp, result: Ok(Err(error_message)), + ret_value: None, }, // Parse errors are not errors with the reducer call itself, // so they don't go to `ParsedMessage::ReducerResult`. @@ -1384,6 +1395,7 @@ async fn parse_loop( request_id, timestamp, result: Err(InternalError::new(error_message)), + ret_value: None, }, } } diff --git a/sdks/rust/tests/procedure-client/src/test_handlers.rs b/sdks/rust/tests/procedure-client/src/test_handlers.rs index d3f75c0698a..0cd2d7e8a50 100644 --- a/sdks/rust/tests/procedure-client/src/test_handlers.rs +++ b/sdks/rust/tests/procedure-client/src/test_handlers.rs @@ -346,7 +346,7 @@ async fn exec_schedule_procedure(db_name: &str) { sub_applied_nothing_result(assert_all_tables_empty(ctx)); ctx.reducers .schedule_proc_then(|_ctx, outcome| match outcome { - Ok(Ok(())) => (), + Ok(Ok(_)) => (), Ok(Err(msg)) => panic!("`schedule_proc` reducer returned error: {msg}"), Err(internal_error) => panic!("`schedule_proc` reducer panicked: {internal_error:?}"), }) diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_from_btree_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_from_btree_u_32_reducer.rs index 9226501bf4c..2819385ad43 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_from_btree_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_from_btree_u_32_reducer.rs @@ -63,6 +63,6 @@ impl delete_from_btree_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteFromBtreeU32Args { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteFromBtreeU32Args { rows }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_large_table_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_large_table_reducer.rs index 0b1c09573c1..6e14a6a11ed 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_large_table_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_large_table_reducer.rs @@ -201,7 +201,7 @@ impl delete_large_table for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback( + self.imp.invoke_reducer_with_callback::<_, ()>( DeleteLargeTableArgs { a, b, diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_bool_reducer.rs index 1ce6ec33e92..65d33194478 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_bool_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_bool for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkBoolArgs { b }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkBoolArgs { b }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_connection_id_reducer.rs index 59d95de4314..08eb30b53a3 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_connection_id_reducer.rs @@ -61,6 +61,6 @@ impl delete_pk_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeletePkConnectionIdArgs { a }, callback) + .invoke_reducer_with_callback::<_, ()>(DeletePkConnectionIdArgs { a }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_128_reducer.rs index 248a8ac330e..b18ba7ad20d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_128_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_i_128 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkI128Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkI128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_16_reducer.rs index 041b396b74d..0b1425e7b99 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_16_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_i_16 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkI16Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkI16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_256_reducer.rs index e19681c31f7..85cf2e383dd 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_256_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_i_256 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkI256Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkI256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_32_reducer.rs index 5ac86a02c7a..2828c12b586 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_32_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_i_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkI32Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkI32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_64_reducer.rs index 237933aa268..bdaa9d003d5 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_64_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_i_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkI64Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkI64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_8_reducer.rs index 9f7f0ea2a13..d6e9120a59a 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_i_8_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_i_8 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkI8Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkI8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_identity_reducer.rs index 6cadf8dc53f..f973e5afdef 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_identity_reducer.rs @@ -61,6 +61,6 @@ impl delete_pk_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeletePkIdentityArgs { i }, callback) + .invoke_reducer_with_callback::<_, ()>(DeletePkIdentityArgs { i }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_string_reducer.rs index e483266eef7..a6aa387edeb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_string_reducer.rs @@ -61,6 +61,6 @@ impl delete_pk_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeletePkStringArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(DeletePkStringArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_128_reducer.rs index 2f7c9fd30b9..83db211c052 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_128_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_u_128 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkU128Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkU128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_16_reducer.rs index 17c6711556b..afc170fb733 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_16_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_u_16 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkU16Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkU16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_256_reducer.rs index 2992c118aa7..b20d16cc13f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_256_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_u_256 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkU256Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkU256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_insert_pk_u_32_two_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_insert_pk_u_32_two_reducer.rs index fac94132b65..895b12cc3e2 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_insert_pk_u_32_two_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_insert_pk_u_32_two_reducer.rs @@ -67,6 +67,6 @@ impl delete_pk_u_32_insert_pk_u_32_two for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeletePkU32InsertPkU32TwoArgs { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(DeletePkU32InsertPkU32TwoArgs { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_reducer.rs index 8facc26002b..c909f24ce95 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_u_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkU32Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkU32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_two_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_two_reducer.rs index fa22d076264..17c1f33cd1d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_two_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_32_two_reducer.rs @@ -61,6 +61,6 @@ impl delete_pk_u_32_two for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeletePkU32TwoArgs { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeletePkU32TwoArgs { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_64_reducer.rs index 5986827f075..fe1f2bcd080 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_64_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_u_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkU64Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkU64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_8_reducer.rs index 387f4b3aa6b..bae1b67e068 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_u_8_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_u_8 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkU8Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkU8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_uuid_reducer.rs index 1c03d1bf7ae..1d1fe17ab08 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_pk_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_pk_uuid_reducer.rs @@ -60,6 +60,7 @@ impl delete_pk_uuid for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePkUuidArgs { u }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePkUuidArgs { u }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_bool_reducer.rs index 783abd1188d..d62c97a04bb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_bool_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_bool for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueBoolArgs { b }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueBoolArgs { b }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_connection_id_reducer.rs index a2dd91ebbbf..aca8559cccd 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_connection_id_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueConnectionIdArgs { a }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueConnectionIdArgs { a }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_128_reducer.rs index 90a87721eb6..40acb268ec4 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_128_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_i_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueI128Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueI128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_16_reducer.rs index cefb527055b..6fde94db669 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_16_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_i_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueI16Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueI16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_256_reducer.rs index b83de3f39bc..f67384d87a3 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_256_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_i_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueI256Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueI256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_32_reducer.rs index ab968509345..eb28ca903b6 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_32_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueI32Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueI32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_64_reducer.rs index f041ba69101..b8961c8cb7b 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_64_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_i_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueI64Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueI64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_8_reducer.rs index 564f3e69b16..2d0dc831643 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_i_8_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_i_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueI8Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueI8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_identity_reducer.rs index daec281853e..f2791804d8a 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_identity_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueIdentityArgs { i }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueIdentityArgs { i }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_string_reducer.rs index 89ac83278e4..6cf1ca1c1cb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_string_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueStringArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueStringArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_128_reducer.rs index 56c0286d6c6..99ff7dd628f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_128_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_u_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueU128Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueU128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_16_reducer.rs index aa282a4fa5d..02cd2f36fc3 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_16_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_u_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueU16Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueU16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_256_reducer.rs index f68566984de..6d67e0facfb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_256_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_u_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueU256Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueU256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_32_reducer.rs index 1f96bd4fb83..062b78cbbcb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_32_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueU32Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueU32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_64_reducer.rs index 74089d52e98..d146fc6d070 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_64_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_u_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueU64Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueU64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_8_reducer.rs index fc71bcdecff..5d9368c5352 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_u_8_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_u_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueU8Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueU8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_uuid_reducer.rs index 8da17564eeb..ce863737084 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/delete_unique_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/delete_unique_uuid_reducer.rs @@ -61,6 +61,6 @@ impl delete_unique_uuid for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(DeleteUniqueUuidArgs { u }, callback) + .invoke_reducer_with_callback::<_, ()>(DeleteUniqueUuidArgs { u }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_call_timestamp_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_call_timestamp_reducer.rs index 0dbce584b28..a744aeeb0a6 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_call_timestamp_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_call_timestamp_reducer.rs @@ -57,6 +57,6 @@ impl insert_call_timestamp for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallTimestampArgs {}, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallTimestampArgs {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_4_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_4_reducer.rs index 80f8afdfc8f..60c35c0d4e3 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_4_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_4_reducer.rs @@ -56,6 +56,7 @@ impl insert_call_uuid_v_4 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertCallUuidV4Args {}, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertCallUuidV4Args {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_7_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_7_reducer.rs index 354f40175b6..7280bc32a02 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_7_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_call_uuid_v_7_reducer.rs @@ -56,6 +56,7 @@ impl insert_call_uuid_v_7 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertCallUuidV7Args {}, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertCallUuidV7Args {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_connection_id_reducer.rs index d71f273acdb..96554f4e225 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_connection_id_reducer.rs @@ -57,6 +57,6 @@ impl insert_caller_one_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerOneConnectionIdArgs {}, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerOneConnectionIdArgs {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_identity_reducer.rs index c97cfd653d5..1711aca4add 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_one_identity_reducer.rs @@ -57,6 +57,6 @@ impl insert_caller_one_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerOneIdentityArgs {}, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerOneIdentityArgs {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_connection_id_reducer.rs index d102657a29c..408cc4937cb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_connection_id_reducer.rs @@ -61,6 +61,6 @@ impl insert_caller_pk_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerPkConnectionIdArgs { data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerPkConnectionIdArgs { data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_identity_reducer.rs index 6769f59d765..b4f7e3ba3e7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_pk_identity_reducer.rs @@ -61,6 +61,6 @@ impl insert_caller_pk_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerPkIdentityArgs { data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerPkIdentityArgs { data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_connection_id_reducer.rs index 53602ab271f..abe39120157 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_connection_id_reducer.rs @@ -61,6 +61,6 @@ impl insert_caller_unique_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerUniqueConnectionIdArgs { data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerUniqueConnectionIdArgs { data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_identity_reducer.rs index ed6f5d40352..99112adaa14 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_unique_identity_reducer.rs @@ -61,6 +61,6 @@ impl insert_caller_unique_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerUniqueIdentityArgs { data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerUniqueIdentityArgs { data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_connection_id_reducer.rs index 469caef7afd..be05b8c2e59 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_connection_id_reducer.rs @@ -57,6 +57,6 @@ impl insert_caller_vec_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerVecConnectionIdArgs {}, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerVecConnectionIdArgs {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_identity_reducer.rs index 8e7b4a4060c..3b01a994c90 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_caller_vec_identity_reducer.rs @@ -57,6 +57,6 @@ impl insert_caller_vec_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertCallerVecIdentityArgs {}, callback) + .invoke_reducer_with_callback::<_, ()>(InsertCallerVecIdentityArgs {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_into_btree_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_into_btree_u_32_reducer.rs index f8cc50d96b8..dfb31250b3d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_into_btree_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_into_btree_u_32_reducer.rs @@ -63,6 +63,6 @@ impl insert_into_btree_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertIntoBtreeU32Args { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertIntoBtreeU32Args { rows }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_into_indexed_simple_enum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_into_indexed_simple_enum_reducer.rs index fad9ba61ef0..cf0e710badb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_into_indexed_simple_enum_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_into_indexed_simple_enum_reducer.rs @@ -63,6 +63,6 @@ impl insert_into_indexed_simple_enum for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertIntoIndexedSimpleEnumArgs { n }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertIntoIndexedSimpleEnumArgs { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_into_pk_btree_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_into_pk_btree_u_32_reducer.rs index 44d97c097d1..e65f1c292ef 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_into_pk_btree_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_into_pk_btree_u_32_reducer.rs @@ -70,6 +70,6 @@ impl insert_into_pk_btree_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertIntoPkBtreeU32Args { pk_u_32, bt_u_32 }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertIntoPkBtreeU32Args { pk_u_32, bt_u_32 }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_large_table_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_large_table_reducer.rs index c321f755504..e0afd7f4491 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_large_table_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_large_table_reducer.rs @@ -201,7 +201,7 @@ impl insert_large_table for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback( + self.imp.invoke_reducer_with_callback::<_, ()>( InsertLargeTableArgs { a, b, diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_bool_reducer.rs index ff421f17294..b23bad5538e 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_bool_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_bool for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneBoolArgs { b }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneBoolArgs { b }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_byte_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_byte_struct_reducer.rs index 18ddaf6a7a3..d63569249e9 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_byte_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_byte_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_one_byte_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneByteStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneByteStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_connection_id_reducer.rs index 77f58cd466d..ed72595f31a 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_connection_id_reducer.rs @@ -61,6 +61,6 @@ impl insert_one_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneConnectionIdArgs { a }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneConnectionIdArgs { a }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_enum_with_payload_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_enum_with_payload_reducer.rs index 4e912217cd6..22295a41afd 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_enum_with_payload_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_enum_with_payload_reducer.rs @@ -63,6 +63,6 @@ impl insert_one_enum_with_payload for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneEnumWithPayloadArgs { e }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneEnumWithPayloadArgs { e }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_primitive_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_primitive_struct_reducer.rs index f1f7fa03a85..7a34d5eb7ce 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_primitive_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_primitive_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_one_every_primitive_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneEveryPrimitiveStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneEveryPrimitiveStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_vec_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_vec_struct_reducer.rs index 53c36d7d196..3d60c1d7bdc 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_vec_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_every_vec_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_one_every_vec_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneEveryVecStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneEveryVecStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_32_reducer.rs index f5808344dfa..4713d0bcf87 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_32_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_f_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneF32Args { f }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneF32Args { f }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_64_reducer.rs index fd7b5dc3a3c..da2abc7ab1d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_f_64_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_f_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneF64Args { f }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneF64Args { f }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_128_reducer.rs index 5a943ca5fdb..3d93f49a82d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_128_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_i_128 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneI128Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneI128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_16_reducer.rs index adf5bff881f..922f34c4b8f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_16_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_i_16 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneI16Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneI16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_256_reducer.rs index 6cf1f1bf9f6..33742ba06e4 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_256_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_i_256 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneI256Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneI256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_32_reducer.rs index fde2b8a6b70..8b06f0dd068 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_32_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_i_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneI32Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneI32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_64_reducer.rs index 7e19eb7f9a2..9db820ce20c 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_64_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_i_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneI64Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneI64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_8_reducer.rs index c7435966950..cf27e5f6788 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_i_8_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_i_8 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneI8Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneI8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_identity_reducer.rs index 7f35b55a4eb..717e65de90a 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_identity_reducer.rs @@ -61,6 +61,6 @@ impl insert_one_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneIdentityArgs { i }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneIdentityArgs { i }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_simple_enum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_simple_enum_reducer.rs index d19062b6976..7c32f63f81f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_simple_enum_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_simple_enum_reducer.rs @@ -63,6 +63,6 @@ impl insert_one_simple_enum for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneSimpleEnumArgs { e }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneSimpleEnumArgs { e }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_string_reducer.rs index 82bfabd0c21..d1319006d65 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_string_reducer.rs @@ -61,6 +61,6 @@ impl insert_one_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneStringArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneStringArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_timestamp_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_timestamp_reducer.rs index 18564954411..dc3f40eb03f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_timestamp_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_timestamp_reducer.rs @@ -61,6 +61,6 @@ impl insert_one_timestamp for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneTimestampArgs { t }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneTimestampArgs { t }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_128_reducer.rs index ef1adf709e9..43e7b5f870a 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_128_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_u_128 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneU128Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneU128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_16_reducer.rs index 92c4dd5b88b..d544e83e56d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_16_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_u_16 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneU16Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneU16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_256_reducer.rs index c364d209aea..385a0258197 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_256_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_u_256 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneU256Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneU256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_32_reducer.rs index 47a73260a96..6d9413d2dc3 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_32_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_u_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneU32Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneU32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_64_reducer.rs index e13129ee9d5..9218b61d889 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_64_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_u_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneU64Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneU64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_8_reducer.rs index 8a48909c164..d814e45c605 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_u_8_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_u_8 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneU8Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneU8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_unit_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_unit_struct_reducer.rs index e8b5b72caef..b5c9e3ef073 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_unit_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_unit_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_one_unit_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOneUnitStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOneUnitStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_one_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_one_uuid_reducer.rs index 082fcce0ad6..f1cd2c2b642 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_one_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_one_uuid_reducer.rs @@ -60,6 +60,7 @@ impl insert_one_uuid for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertOneUuidArgs { u }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertOneUuidArgs { u }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_option_every_primitive_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_option_every_primitive_struct_reducer.rs index 1b354052adb..b8a753d0665 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_option_every_primitive_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_option_every_primitive_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_option_every_primitive_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOptionEveryPrimitiveStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOptionEveryPrimitiveStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_option_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_option_i_32_reducer.rs index a7cff1942b9..7f8f103b186 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_option_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_option_i_32_reducer.rs @@ -61,6 +61,6 @@ impl insert_option_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOptionI32Args { n }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOptionI32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_option_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_option_identity_reducer.rs index f85cb843a86..c477bb4767c 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_option_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_option_identity_reducer.rs @@ -61,6 +61,6 @@ impl insert_option_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOptionIdentityArgs { i }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOptionIdentityArgs { i }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_option_simple_enum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_option_simple_enum_reducer.rs index b6f3d6e5141..a01580d9713 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_option_simple_enum_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_option_simple_enum_reducer.rs @@ -63,6 +63,6 @@ impl insert_option_simple_enum for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOptionSimpleEnumArgs { e }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOptionSimpleEnumArgs { e }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_option_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_option_string_reducer.rs index 8cf4591b38c..fc536d49b36 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_option_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_option_string_reducer.rs @@ -61,6 +61,6 @@ impl insert_option_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOptionStringArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOptionStringArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_option_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_option_uuid_reducer.rs index 892e2b1c2f1..ec55a2297b7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_option_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_option_uuid_reducer.rs @@ -61,6 +61,6 @@ impl insert_option_uuid for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOptionUuidArgs { u }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOptionUuidArgs { u }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_option_vec_option_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_option_vec_option_i_32_reducer.rs index 17c031ed031..4dd452a2b7d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_option_vec_option_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_option_vec_option_i_32_reducer.rs @@ -61,6 +61,6 @@ impl insert_option_vec_option_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertOptionVecOptionI32Args { v }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertOptionVecOptionI32Args { v }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_bool_reducer.rs index b4a0a6cf243..aeac3d14277 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_bool_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_bool for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkBoolArgs { b, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkBoolArgs { b, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_connection_id_reducer.rs index 3f96b88ffaa..4d7a7b16e85 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_connection_id_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkConnectionIdArgs { a, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkConnectionIdArgs { a, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_128_reducer.rs index 8574ba3791a..8625f2bd512 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_128_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_i_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkI128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkI128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_16_reducer.rs index 44f591b555a..71ecea6a1f1 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_16_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_i_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkI16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkI16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_256_reducer.rs index 39694d339d4..6b5688f850b 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_256_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_i_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkI256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkI256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_32_reducer.rs index b159a60c34c..22a3ddc1e9b 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_32_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkI32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkI32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_64_reducer.rs index 0dcb42e4076..8d6cdbfa0a8 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_64_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_i_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkI64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkI64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_8_reducer.rs index 9ecb82e5e00..bda267dfaf9 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_i_8_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_i_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkI8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkI8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_identity_reducer.rs index 22afefb8370..d33384d7354 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_identity_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkIdentityArgs { i, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkIdentityArgs { i, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_simple_enum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_simple_enum_reducer.rs index be489235314..aeec0c49a29 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_simple_enum_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_simple_enum_reducer.rs @@ -69,6 +69,6 @@ impl insert_pk_simple_enum for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkSimpleEnumArgs { a, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkSimpleEnumArgs { a, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_string_reducer.rs index 474c4a2db12..8185e26a4b2 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_string_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkStringArgs { s, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkStringArgs { s, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_128_reducer.rs index 7553dfc0933..ff9b316505c 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_128_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_u_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkU128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkU128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_16_reducer.rs index 85d1beff527..68e193dd858 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_16_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_u_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkU16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkU16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_256_reducer.rs index c1f3b1c385f..e41d1181b6a 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_256_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_u_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkU256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkU256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_reducer.rs index e995b04c589..ea617188225 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkU32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkU32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_two_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_two_reducer.rs index 7919912b9b8..0110c70be30 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_two_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_32_two_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_u_32_two for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkU32TwoArgs { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkU32TwoArgs { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_64_reducer.rs index c8c610f9f5b..0f069cda0f2 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_64_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_u_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkU64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkU64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_8_reducer.rs index 5cc27b447a6..bdd259eae88 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_u_8_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_u_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkU8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkU8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_uuid_reducer.rs index 7e51921402c..a89aeea3d60 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_pk_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_pk_uuid_reducer.rs @@ -67,6 +67,6 @@ impl insert_pk_uuid for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPkUuidArgs { u, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPkUuidArgs { u, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_primitives_as_strings_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_primitives_as_strings_reducer.rs index 91b570f6f6a..ab2113beda4 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_primitives_as_strings_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_primitives_as_strings_reducer.rs @@ -63,6 +63,6 @@ impl insert_primitives_as_strings for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertPrimitivesAsStringsArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertPrimitivesAsStringsArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_result_every_primitive_struct_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_result_every_primitive_struct_string_reducer.rs index 1d695df665f..5a5ab9af245 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_result_every_primitive_struct_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_result_every_primitive_struct_string_reducer.rs @@ -66,6 +66,6 @@ impl insert_result_every_primitive_struct_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertResultEveryPrimitiveStructStringArgs { r }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertResultEveryPrimitiveStructStringArgs { r }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_result_i_32_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_result_i_32_string_reducer.rs index 50f44f3ed20..95779f59eb5 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_result_i_32_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_result_i_32_string_reducer.rs @@ -61,6 +61,6 @@ impl insert_result_i_32_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertResultI32StringArgs { r }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertResultI32StringArgs { r }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_result_identity_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_result_identity_string_reducer.rs index 372cc4295ba..603bb14de24 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_result_identity_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_result_identity_string_reducer.rs @@ -61,6 +61,6 @@ impl insert_result_identity_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertResultIdentityStringArgs { r }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertResultIdentityStringArgs { r }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_result_simple_enum_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_result_simple_enum_i_32_reducer.rs index 059db78c5b7..33db7d82d7d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_result_simple_enum_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_result_simple_enum_i_32_reducer.rs @@ -63,6 +63,6 @@ impl insert_result_simple_enum_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertResultSimpleEnumI32Args { r }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertResultSimpleEnumI32Args { r }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_result_string_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_result_string_i_32_reducer.rs index c0e548b6698..5911f8cd1e5 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_result_string_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_result_string_i_32_reducer.rs @@ -61,6 +61,6 @@ impl insert_result_string_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertResultStringI32Args { r }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertResultStringI32Args { r }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_result_vec_i_32_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_result_vec_i_32_string_reducer.rs index dab246f8f14..82a370eaf64 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_result_vec_i_32_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_result_vec_i_32_string_reducer.rs @@ -61,6 +61,6 @@ impl insert_result_vec_i_32_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertResultVecI32StringArgs { r }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertResultVecI32StringArgs { r }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_table_holds_table_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_table_holds_table_reducer.rs index 9a824bc72bf..b4daafc81fe 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_table_holds_table_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_table_holds_table_reducer.rs @@ -67,6 +67,6 @@ impl insert_table_holds_table for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertTableHoldsTableArgs { a, b }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertTableHoldsTableArgs { a, b }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_bool_reducer.rs index 9323417d2e8..f7666b39532 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_bool_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_bool for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueBoolArgs { b, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueBoolArgs { b, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_connection_id_reducer.rs index 7b4784f0618..ea713301211 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_connection_id_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueConnectionIdArgs { a, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueConnectionIdArgs { a, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_128_reducer.rs index 89fea88ae1f..2d603cd5ecd 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_128_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_i_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueI128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueI128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_16_reducer.rs index 57f0c6568d7..09818f14a56 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_16_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_i_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueI16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueI16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_256_reducer.rs index d42d569dd23..61b83ddb4a7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_256_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_i_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueI256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueI256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_32_reducer.rs index 53cf468b9cd..fbb94a9cb07 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_32_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueI32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueI32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_64_reducer.rs index de8ddd29b70..7051aa69cb2 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_64_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_i_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueI64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueI64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_8_reducer.rs index 4148a6cb964..6cee4a5c8b1 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_i_8_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_i_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueI8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueI8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_identity_reducer.rs index 2c94e420fcc..74fffb03ac2 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_identity_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueIdentityArgs { i, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueIdentityArgs { i, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_string_reducer.rs index 4485cdcd89b..1cd55c2a1dc 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_string_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueStringArgs { s, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueStringArgs { s, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_128_reducer.rs index e7c833300c7..e9497a22919 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_128_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_u_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueU128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueU128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_16_reducer.rs index f639c955749..26c2e6757fe 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_16_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_u_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueU16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueU16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_256_reducer.rs index 05d01e15505..03c3904fe3c 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_256_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_u_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueU256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueU256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_reducer.rs index c7055279174..ac77c5986ba 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueU32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueU32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_update_pk_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_update_pk_u_32_reducer.rs index 31f490fcd7a..85bbac40a99 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_update_pk_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_32_update_pk_u_32_reducer.rs @@ -71,6 +71,6 @@ impl insert_unique_u_32_update_pk_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueU32UpdatePkU32Args { n, d_unique, d_pk }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueU32UpdatePkU32Args { n, d_unique, d_pk }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_64_reducer.rs index b3c15576050..e2a214cf5b7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_64_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_u_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueU64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueU64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_8_reducer.rs index 4445e03d0de..7325296a72b 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_u_8_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_u_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueU8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueU8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_uuid_reducer.rs index 5cf1f39b5c7..c6944741429 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_unique_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_unique_uuid_reducer.rs @@ -67,6 +67,6 @@ impl insert_unique_uuid for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUniqueUuidArgs { u, data }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUniqueUuidArgs { u, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_user_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_user_reducer.rs index 00db4ccf77a..e292418ccc4 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_user_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_user_reducer.rs @@ -67,6 +67,6 @@ impl insert_user for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertUserArgs { name, identity }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertUserArgs { name, identity }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_bool_reducer.rs index 89c0b05d3fd..1e86773acec 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_bool_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_bool for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecBoolArgs { b }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecBoolArgs { b }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_byte_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_byte_struct_reducer.rs index be6b94f6473..a77ae19b376 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_byte_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_byte_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_vec_byte_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecByteStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecByteStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_connection_id_reducer.rs index 0a4a62253ae..6c4a6cff934 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_connection_id_reducer.rs @@ -61,6 +61,6 @@ impl insert_vec_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecConnectionIdArgs { a }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecConnectionIdArgs { a }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_enum_with_payload_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_enum_with_payload_reducer.rs index c9259b15c05..72be6e80cab 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_enum_with_payload_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_enum_with_payload_reducer.rs @@ -63,6 +63,6 @@ impl insert_vec_enum_with_payload for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecEnumWithPayloadArgs { e }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecEnumWithPayloadArgs { e }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_primitive_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_primitive_struct_reducer.rs index de7134e0ed8..dc963b00ea6 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_primitive_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_primitive_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_vec_every_primitive_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecEveryPrimitiveStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecEveryPrimitiveStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_vec_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_vec_struct_reducer.rs index 670a771173a..3f18a7933c9 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_vec_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_every_vec_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_vec_every_vec_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecEveryVecStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecEveryVecStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_32_reducer.rs index 6618e92c54c..1288abe681e 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_32_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_f_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecF32Args { f }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecF32Args { f }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_64_reducer.rs index 3e08df60bf5..c3549b9eddb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_f_64_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_f_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecF64Args { f }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecF64Args { f }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_128_reducer.rs index 06658035dfc..6aab2785887 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_128_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_i_128 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecI128Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecI128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_16_reducer.rs index 1a33c41494e..13a0cfe20d5 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_16_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_i_16 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecI16Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecI16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_256_reducer.rs index 3f93417fa08..a55ec00092b 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_256_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_i_256 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecI256Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecI256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_32_reducer.rs index fb7ec34a86c..6fd3b00b00c 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_32_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_i_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecI32Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecI32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_64_reducer.rs index d0573f68715..15a501c1222 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_64_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_i_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecI64Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecI64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_8_reducer.rs index f24b4dfecd3..8e58d59ed19 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_i_8_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_i_8 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecI8Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecI8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_identity_reducer.rs index 5c16a18779d..f5ff81990fe 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_identity_reducer.rs @@ -61,6 +61,6 @@ impl insert_vec_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecIdentityArgs { i }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecIdentityArgs { i }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_simple_enum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_simple_enum_reducer.rs index 9be306e354d..06ebd58b998 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_simple_enum_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_simple_enum_reducer.rs @@ -63,6 +63,6 @@ impl insert_vec_simple_enum for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecSimpleEnumArgs { e }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecSimpleEnumArgs { e }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_string_reducer.rs index c9239addb1b..b75af5db2d7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_string_reducer.rs @@ -61,6 +61,6 @@ impl insert_vec_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecStringArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecStringArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_timestamp_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_timestamp_reducer.rs index f0c01b73284..dac2edb0e2f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_timestamp_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_timestamp_reducer.rs @@ -61,6 +61,6 @@ impl insert_vec_timestamp for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecTimestampArgs { t }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecTimestampArgs { t }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_128_reducer.rs index 57e5b6d6685..eb506f4367c 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_128_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_u_128 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecU128Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecU128Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_16_reducer.rs index bf678a54a38..d107a436e32 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_16_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_u_16 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecU16Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecU16Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_256_reducer.rs index 77f141c860c..b013745f915 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_256_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_u_256 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecU256Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecU256Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_32_reducer.rs index cb620ec777d..2c56da67a75 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_32_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_u_32 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecU32Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecU32Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_64_reducer.rs index 013aeab42ad..8144402294e 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_64_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_u_64 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecU64Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecU64Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_8_reducer.rs index dbef8ca1ad9..7cf62e470d6 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_u_8_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_u_8 for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecU8Args { n }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecU8Args { n }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_unit_struct_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_unit_struct_reducer.rs index 2c9b9640a2a..5ded9f1bc45 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_unit_struct_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_unit_struct_reducer.rs @@ -63,6 +63,6 @@ impl insert_vec_unit_struct for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(InsertVecUnitStructArgs { s }, callback) + .invoke_reducer_with_callback::<_, ()>(InsertVecUnitStructArgs { s }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_uuid_reducer.rs index fdbb44a7114..fb87c3e2f9f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/insert_vec_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/insert_vec_uuid_reducer.rs @@ -60,6 +60,7 @@ impl insert_vec_uuid for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(InsertVecUuidArgs { u }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(InsertVecUuidArgs { u }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/mod.rs b/sdks/rust/tests/test-client/src/module_bindings/mod.rs index 50dc1e0cb5a..18e911a9c89 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/mod.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/mod.rs @@ -1,7 +1,7 @@ // THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE // WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. -// This was generated using spacetimedb cli version 2.1.0 (commit 77575596072d271b763513ec1833d4a6e0627aef). +// This was generated using spacetimedb cli version 2.1.0 (commit c089c3adb0ce89e2bde3d143bf1189aaee10a37e). #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; @@ -295,6 +295,7 @@ pub mod result_string_i_32_table; pub mod result_string_i_32_type; pub mod result_vec_i_32_string_table; pub mod result_vec_i_32_string_type; +pub mod return_sum_reducer; pub mod scheduled_table_table; pub mod scheduled_table_type; pub mod send_scheduled_message_reducer; @@ -718,6 +719,7 @@ pub use result_string_i_32_table::*; pub use result_string_i_32_type::ResultStringI32; pub use result_vec_i_32_string_table::*; pub use result_vec_i_32_string_type::ResultVecI32String; +pub use return_sum_reducer::return_sum; pub use scheduled_table_table::*; pub use scheduled_table_type::ScheduledTable; pub use send_scheduled_message_reducer::send_scheduled_message; @@ -1405,6 +1407,10 @@ pub enum Reducer { u: Vec<__sdk::Uuid>, }, NoOpSucceeds, + ReturnSum { + a: i32, + b: i32, + }, SendScheduledMessage { arg: ScheduledTable, }, @@ -1725,6 +1731,7 @@ impl __sdk::Reducer for Reducer { Reducer::InsertVecUnitStruct { .. } => "insert_vec_unit_struct", Reducer::InsertVecUuid { .. } => "insert_vec_uuid", Reducer::NoOpSucceeds => "no_op_succeeds", + Reducer::ReturnSum { .. } => "return_sum", Reducer::SendScheduledMessage { .. } => "send_scheduled_message", Reducer::SortedUuidsInsert => "sorted_uuids_insert", Reducer::UpdateIndexedSimpleEnum { .. } => "update_indexed_simple_enum", @@ -2445,6 +2452,10 @@ impl __sdk::Reducer for Reducer { __sats::bsatn::to_vec(&insert_vec_uuid_reducer::InsertVecUuidArgs { u: u.clone() }) } Reducer::NoOpSucceeds => __sats::bsatn::to_vec(&no_op_succeeds_reducer::NoOpSucceedsArgs {}), + Reducer::ReturnSum { a, b } => __sats::bsatn::to_vec(&return_sum_reducer::ReturnSumArgs { + a: a.clone(), + b: b.clone(), + }), Reducer::SendScheduledMessage { arg } => { __sats::bsatn::to_vec(&send_scheduled_message_reducer::SendScheduledMessageArgs { arg: arg.clone() }) } diff --git a/sdks/rust/tests/test-client/src/module_bindings/no_op_succeeds_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/no_op_succeeds_reducer.rs index ed73d11110c..6f6aaafce8b 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/no_op_succeeds_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/no_op_succeeds_reducer.rs @@ -56,6 +56,7 @@ impl no_op_succeeds for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(NoOpSucceedsArgs {}, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(NoOpSucceedsArgs {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/return_sum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/return_sum_reducer.rs new file mode 100644 index 00000000000..cdb0f28135f --- /dev/null +++ b/sdks/rust/tests/test-client/src/module_bindings/return_sum_reducer.rs @@ -0,0 +1,69 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct ReturnSumArgs { + pub a: i32, + pub b: i32, +} + +impl From for super::Reducer { + fn from(args: ReturnSumArgs) -> Self { + Self::ReturnSum { a: args.a, b: args.b } + } +} + +impl __sdk::InModule for ReturnSumArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `return_sum`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait return_sum { + /// Request that the remote module invoke the reducer `return_sum` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`return_sum:return_sum_then`] to run a callback after the reducer completes. + fn return_sum(&self, a: i32, b: i32) -> __sdk::Result<()> { + self.return_sum_then(a, b, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `return_sum` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn return_sum_then( + &self, + a: i32, + b: i32, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl return_sum for super::RemoteReducers { + fn return_sum_then( + &self, + a: i32, + b: i32, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback::<_, i32>(ReturnSumArgs { a, b }, callback) + } +} diff --git a/sdks/rust/tests/test-client/src/module_bindings/send_scheduled_message_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/send_scheduled_message_reducer.rs index 2c6a5d14efa..62e64ee453d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/send_scheduled_message_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/send_scheduled_message_reducer.rs @@ -63,6 +63,6 @@ impl send_scheduled_message for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(SendScheduledMessageArgs { arg }, callback) + .invoke_reducer_with_callback::<_, ()>(SendScheduledMessageArgs { arg }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/sorted_uuids_insert_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/sorted_uuids_insert_reducer.rs index f20a90b3b89..bc0650ab2dd 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/sorted_uuids_insert_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/sorted_uuids_insert_reducer.rs @@ -57,6 +57,6 @@ impl sorted_uuids_insert for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(SortedUuidsInsertArgs {}, callback) + .invoke_reducer_with_callback::<_, ()>(SortedUuidsInsertArgs {}, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_indexed_simple_enum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_indexed_simple_enum_reducer.rs index a722ed83c33..f89ebe710ea 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_indexed_simple_enum_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_indexed_simple_enum_reducer.rs @@ -66,6 +66,6 @@ impl update_indexed_simple_enum for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateIndexedSimpleEnumArgs { a, b }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateIndexedSimpleEnumArgs { a, b }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_bool_reducer.rs index 2e09d7ec3b5..f7147ed87b7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_bool_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_bool for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkBoolArgs { b, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkBoolArgs { b, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_connection_id_reducer.rs index 4029f1e7b4f..a7cd9042cd4 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_connection_id_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkConnectionIdArgs { a, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkConnectionIdArgs { a, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_128_reducer.rs index 1e8e2535e90..d3523c8cc3d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_128_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_i_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkI128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkI128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_16_reducer.rs index ae461fb747e..678a3920229 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_16_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_i_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkI16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkI16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_256_reducer.rs index ef356225f9a..7141c05c6f3 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_256_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_i_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkI256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkI256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_32_reducer.rs index 1066832f11b..04e0fe8ae8f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_32_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkI32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkI32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_64_reducer.rs index 660cd6e11b7..4f9a47ee7eb 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_64_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_i_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkI64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkI64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_8_reducer.rs index b5337123863..25055a24b79 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_i_8_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_i_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkI8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkI8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_identity_reducer.rs index 52b8781a35e..59ae9fd4910 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_identity_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkIdentityArgs { i, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkIdentityArgs { i, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_simple_enum_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_simple_enum_reducer.rs index 2717827dc00..30c20c04668 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_simple_enum_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_simple_enum_reducer.rs @@ -69,6 +69,6 @@ impl update_pk_simple_enum for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkSimpleEnumArgs { a, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkSimpleEnumArgs { a, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_string_reducer.rs index 82c39b7a7c5..15493a81bac 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_string_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkStringArgs { s, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkStringArgs { s, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_128_reducer.rs index c8a740af000..5871d1578af 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_128_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_u_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkU128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkU128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_16_reducer.rs index cac9ebcd8b7..2a248a95f80 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_16_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_u_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkU16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkU16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_256_reducer.rs index 194132c02a6..d72f1735427 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_256_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_u_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkU256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkU256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_reducer.rs index 6f29d4e1b7f..f30011138ab 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkU32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkU32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_two_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_two_reducer.rs index f7d2a868892..b1fe1730c4f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_two_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_32_two_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_u_32_two for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkU32TwoArgs { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkU32TwoArgs { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_64_reducer.rs index f0765b6ba76..2b2e921f08f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_64_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_u_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkU64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkU64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_8_reducer.rs index 5f878f520ec..82bce227062 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_u_8_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_u_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkU8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkU8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_pk_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_pk_uuid_reducer.rs index f58a6013837..62b255ff822 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_pk_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_pk_uuid_reducer.rs @@ -67,6 +67,6 @@ impl update_pk_uuid for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdatePkUuidArgs { u, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdatePkUuidArgs { u, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_bool_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_bool_reducer.rs index 7ca90908b75..7cab1c9dc0d 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_bool_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_bool_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_bool for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueBoolArgs { b, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueBoolArgs { b, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_connection_id_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_connection_id_reducer.rs index 5e400d60769..1832772f3d4 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_connection_id_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_connection_id_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_connection_id for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueConnectionIdArgs { a, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueConnectionIdArgs { a, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_128_reducer.rs index 9d5e0a18022..77c7b70edb7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_128_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_i_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueI128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueI128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_16_reducer.rs index efa868e5e81..3bc25bbdb53 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_16_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_i_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueI16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueI16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_256_reducer.rs index 3cb6e0a84bf..03b4327231f 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_256_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_i_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueI256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueI256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_32_reducer.rs index dd026538797..14feb5701ac 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_32_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_i_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueI32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueI32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_64_reducer.rs index 5c76ec62b89..bc0c6f9bd94 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_64_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_i_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueI64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueI64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_8_reducer.rs index 83b9ca8ef7f..d723165eea2 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_i_8_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_i_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueI8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueI8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_identity_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_identity_reducer.rs index 05f2cad16ec..5a714c71cd7 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_identity_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_identity_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_identity for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueIdentityArgs { i, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueIdentityArgs { i, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_string_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_string_reducer.rs index 53cf5206180..bb17b897b55 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_string_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_string_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_string for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueStringArgs { s, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueStringArgs { s, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_128_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_128_reducer.rs index a43c5fc1cca..448f223c7f5 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_128_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_128_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_u_128 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueU128Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueU128Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_16_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_16_reducer.rs index ebaa1954361..1c8c7012631 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_16_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_16_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_u_16 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueU16Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueU16Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_256_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_256_reducer.rs index 5dcea25d29a..be6386d74a9 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_256_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_256_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_u_256 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueU256Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueU256Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_32_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_32_reducer.rs index 0c18ef1fcb5..dd39d3cd5b0 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_32_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_32_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_u_32 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueU32Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueU32Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_64_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_64_reducer.rs index 65ba07be372..50b2700e530 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_64_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_64_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_u_64 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueU64Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueU64Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_8_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_8_reducer.rs index fde01c71114..02e9379cff3 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_8_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_u_8_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_u_8 for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueU8Args { n, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueU8Args { n, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/module_bindings/update_unique_uuid_reducer.rs b/sdks/rust/tests/test-client/src/module_bindings/update_unique_uuid_reducer.rs index a505028fd45..57315601a59 100644 --- a/sdks/rust/tests/test-client/src/module_bindings/update_unique_uuid_reducer.rs +++ b/sdks/rust/tests/test-client/src/module_bindings/update_unique_uuid_reducer.rs @@ -67,6 +67,6 @@ impl update_unique_uuid for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(UpdateUniqueUuidArgs { u, data }, callback) + .invoke_reducer_with_callback::<_, ()>(UpdateUniqueUuidArgs { u, data }, callback) } } diff --git a/sdks/rust/tests/test-client/src/pk_test_table.rs b/sdks/rust/tests/test-client/src/pk_test_table.rs index 45ee220cc2f..2e869f6da52 100644 --- a/sdks/rust/tests/test-client/src/pk_test_table.rs +++ b/sdks/rust/tests/test-client/src/pk_test_table.rs @@ -157,7 +157,7 @@ macro_rules! impl_pk_test_table { fn insert(ctx: &impl RemoteDbContext, key: Self::PrimaryKey, value: i32) { ctx.reducers().$insert_reducer_then(key, value, |ctx, outcome| { match outcome { - Ok(Ok(())) => assert!(Self::is_insert_reducer_event(&ctx.event.reducer)), + Ok(Ok(_)) => assert!(Self::is_insert_reducer_event(&ctx.event.reducer)), Ok(Err(msg)) => panic!("Insert reducer returned error: {msg}"), Err(internal_error) => panic!("Insert reducer panicked: {internal_error:?}"), } @@ -166,7 +166,7 @@ macro_rules! impl_pk_test_table { fn delete(ctx: &impl RemoteDbContext, key: Self::PrimaryKey) { ctx.reducers().$delete_reducer_then(key, |ctx, outcome| { match outcome { - Ok(Ok(())) => assert!(Self::is_delete_reducer_event(&ctx.event.reducer)), + Ok(Ok(_)) => assert!(Self::is_delete_reducer_event(&ctx.event.reducer)), Ok(Err(msg)) => panic!("Delete reducer returned error: {msg}"), Err(internal_error) => panic!("Delete reducer panicked: {internal_error:?}"), } @@ -175,7 +175,7 @@ macro_rules! impl_pk_test_table { fn update(ctx: &impl RemoteDbContext, key: Self::PrimaryKey, new_value: i32) { ctx.reducers().$update_reducer_then(key, new_value, |ctx, outcome| { match outcome { - Ok(Ok(())) => assert!(Self::is_update_reducer_event(&ctx.event.reducer)), + Ok(Ok(_)) => assert!(Self::is_update_reducer_event(&ctx.event.reducer)), Ok(Err(msg)) => panic!("Update reducer returned error: {msg}"), Err(internal_error) => panic!("Update reducer panicked: {internal_error:?}"), } diff --git a/sdks/rust/tests/test-client/src/simple_test_table.rs b/sdks/rust/tests/test-client/src/simple_test_table.rs index 52d6a350194..8d859a56c37 100644 --- a/sdks/rust/tests/test-client/src/simple_test_table.rs +++ b/sdks/rust/tests/test-client/src/simple_test_table.rs @@ -40,7 +40,7 @@ macro_rules! impl_simple_test_table { fn insert(ctx: &impl RemoteDbContext, contents: Self::Contents) { ctx.reducers().$insert_reducer_then(contents, |ctx, outcome| { match outcome { - Ok(Ok(())) => assert!(Self::is_insert_reducer_event(&ctx.event.reducer)), + Ok(Ok(_)) => assert!(Self::is_insert_reducer_event(&ctx.event.reducer)), Ok(Err(msg)) => panic!("Insert reducer returned error: {msg}"), Err(internal_error) => panic!("Insert reducer panicked: {internal_error:?}"), } diff --git a/sdks/rust/tests/test-client/src/test_handlers.rs b/sdks/rust/tests/test-client/src/test_handlers.rs index d83ee46cf4d..483a5717ae4 100644 --- a/sdks/rust/tests/test-client/src/test_handlers.rs +++ b/sdks/rust/tests/test-client/src/test_handlers.rs @@ -75,6 +75,7 @@ pub async fn dispatch(test: &str, db_name: &str) { "on-reducer" => exec_on_reducer(db_name).await, "fail-reducer" => exec_fail_reducer(db_name).await, + "reducer-return-values" => exec_reducer_return_values(db_name).await, "insert-vec" => exec_insert_vec(db_name).await, "insert-option-some" => exec_insert_option_some(db_name).await, @@ -428,17 +429,17 @@ fn subscribe_these_then( .subscribe(queries); } -fn assert_outcome_committed(reducer_name: &'static str, outcome: Result, InternalError>) { +fn assert_outcome_committed(reducer_name: &'static str, outcome: Result, InternalError>) { match outcome { - Ok(Ok(())) => (), + Ok(Ok(_)) => (), Ok(Err(msg)) => panic!("`{reducer_name}` reducer returned error: {msg}"), Err(internal_error) => panic!("`{reducer_name}` reducer panicked: {internal_error:?}"), } } -fn reducer_callback_assert_committed( +fn reducer_callback_assert_committed( reducer_name: &'static str, -) -> impl FnOnce(&ReducerEventContext, Result, InternalError>) + Send + 'static { +) -> impl FnOnce(&ReducerEventContext, Result, InternalError>) + Send + 'static { move |_ctx, outcome| assert_outcome_committed(reducer_name, outcome) } @@ -1010,7 +1011,7 @@ async fn exec_on_reducer(db_name: &str) { .insert_one_u_8_then(value, move |ctx, status| { let run_checks = || { match status { - Ok(Ok(())) => {} + Ok(Ok(_)) => {} other => anyhow::bail!("Unexpected status: {other:?}"), } if !matches!(ctx.event.status, Status::Committed) { @@ -1065,7 +1066,7 @@ async fn exec_fail_reducer(db_name: &str) { .insert_pk_u_8_then(key, initial_data, move |ctx, status| { let run_checks = || { match &status { - Ok(Ok(())) => {} + Ok(Ok(_)) => {} other => anyhow::bail!("Expected success but got {other:?}"), } if !matches!(ctx.event.status, Status::Committed) { @@ -1098,7 +1099,7 @@ async fn exec_fail_reducer(db_name: &str) { ctx.reducers .insert_pk_u_8_then(key, fail_data, move |ctx, status| { let run_checks = || { - if let Ok(Ok(())) = &status { + if let Ok(Ok(_)) = &status { anyhow::bail!( "Expected reducer `insert_pk_u_8` to error or panic, but got a successful return" ) @@ -1142,6 +1143,41 @@ async fn exec_fail_reducer(db_name: &str) { test_counter.wait_for_all().await; } +/// This tests that reducers can return values through the callback. +async fn exec_reducer_return_values(db_name: &str) { + let test_counter = TestCounter::new(); + let sub_applied_nothing_result = test_counter.add_test("on_subscription_applied_nothing"); + let reducer_result = test_counter.add_test("reducer-return-value"); + + let connection = connect(db_name, &test_counter).await; + + subscribe_all_then(&connection, move |ctx| { + sub_applied_nothing_result(assert_all_tables_empty(ctx)); + + ctx.reducers + .return_sum_then(2, 3, move |ctx, status| { + let run_checks = || { + let value = match status { + Ok(Ok(value)) => value, + other => anyhow::bail!("Unexpected status: {other:?}"), + }; + if value != 5 { + anyhow::bail!("Unexpected return value: expected 5 but found {value}"); + } + if !matches!(ctx.event.status, Status::Committed) { + anyhow::bail!("Unexpected status. Expected Committed but found {:?}", ctx.event.status); + } + Ok(()) + }; + + reducer_result(run_checks()); + }) + .unwrap(); + }); + + test_counter.wait_for_all().await; +} + /// This tests that we can serialize and deserialize `Vec` in various contexts. async fn exec_insert_vec(db_name: &str) { let test_counter = TestCounter::new(); @@ -1960,8 +1996,8 @@ async fn exec_sorted_uuids_insert(db_name: &str) { // Surely it should have some more assertions in it... let run_checks = || { match status { - Ok(Ok(())) => (), - _ => anyhow::bail!("Unexpected status: Expected Ok(Ok(())) but got {status:?}"), + Ok(Ok(_)) => (), + _ => anyhow::bail!("Unexpected status: Expected Ok(Ok(_)) but got {status:?}"), } if !matches!(ctx.event.reducer, Reducer::SortedUuidsInsert) { anyhow::bail!( diff --git a/sdks/rust/tests/test-client/src/unique_test_table.rs b/sdks/rust/tests/test-client/src/unique_test_table.rs index 672565f752e..4a2164b0b90 100644 --- a/sdks/rust/tests/test-client/src/unique_test_table.rs +++ b/sdks/rust/tests/test-client/src/unique_test_table.rs @@ -112,7 +112,7 @@ macro_rules! impl_unique_test_table { fn insert(ctx: &impl RemoteDbContext, key: Self::Key, value: i32) { ctx.reducers().$insert_reducer_then(key, value, |ctx, outcome| { match outcome { - Ok(Ok(())) => assert!(Self::is_insert_reducer_event(&ctx.event.reducer)), + Ok(Ok(_)) => assert!(Self::is_insert_reducer_event(&ctx.event.reducer)), Ok(Err(msg)) => panic!("Insert reducer returned error: {msg}"), Err(internal_error) => panic!("Insert reducer panicked: {internal_error:?}"), } @@ -121,7 +121,7 @@ macro_rules! impl_unique_test_table { fn delete(ctx: &impl RemoteDbContext, key: Self::Key) { ctx.reducers().$delete_reducer_then(key, |ctx, outcome| { match outcome { - Ok(Ok(())) => assert!(Self::is_delete_reducer_event(&ctx.event.reducer)), + Ok(Ok(_)) => assert!(Self::is_delete_reducer_event(&ctx.event.reducer)), Ok(Err(msg)) => panic!("Delete reducer returned error: {msg}"), Err(internal_error) => panic!("Delete reducer panicked: {internal_error:?}"), } diff --git a/sdks/rust/tests/test.rs b/sdks/rust/tests/test.rs index 1008d2c54f4..54ba84be242 100644 --- a/sdks/rust/tests/test.rs +++ b/sdks/rust/tests/test.rs @@ -90,6 +90,16 @@ fn platform_test_builder(client_project: &str, run_selector: Option<&str>) -> Te } } +macro_rules! maybe_reducer_return_test { + (rust, $make_test:ident) => { + #[test] + fn reducer_return_values() { + $make_test("reducer-return-values").run(); + } + }; + ($other:ident, $make_test:ident) => {}; +} + macro_rules! declare_tests_with_suffix { ($lang:ident, $suffix:literal) => { mod $lang { @@ -222,6 +232,8 @@ macro_rules! declare_tests_with_suffix { make_test("fail-reducer").run(); } + maybe_reducer_return_test!($lang, make_test); + #[test] fn insert_vec() { make_test("insert-vec").run(); diff --git a/sdks/rust/tests/view-client/src/test_handlers.rs b/sdks/rust/tests/view-client/src/test_handlers.rs index 21b9dc43ae7..a47e23f7f8b 100644 --- a/sdks/rust/tests/view-client/src/test_handlers.rs +++ b/sdks/rust/tests/view-client/src/test_handlers.rs @@ -143,11 +143,11 @@ fn put_result(result: &mut Option, res: Result<(), anyhow::Error (result.take().unwrap())(res); } -fn reducer_callback_assert_committed( +fn reducer_callback_assert_committed( reducer_name: &'static str, -) -> impl FnOnce(&ReducerEventContext, Result, InternalError>) + Send + 'static { +) -> impl FnOnce(&ReducerEventContext, Result, InternalError>) + Send + 'static { move |_ctx, outcome| match outcome { - Ok(Ok(())) => (), + Ok(Ok(_)) => (), Ok(Err(msg)) => panic!("`{reducer_name}` reducer returned error: {msg}"), Err(internal_error) => panic!("`{reducer_name}` reducer panicked: {internal_error:?}"), } diff --git a/sdks/rust/tests/view-pk-client/src/test_handlers.rs b/sdks/rust/tests/view-pk-client/src/test_handlers.rs index 4a7de205d38..61031c201ca 100644 --- a/sdks/rust/tests/view-pk-client/src/test_handlers.rs +++ b/sdks/rust/tests/view-pk-client/src/test_handlers.rs @@ -27,11 +27,11 @@ fn put_result(result: &mut Option, res: Result<(), anyhow::Error (result.take().unwrap())(res); } -fn reducer_callback_assert_committed( +fn reducer_callback_assert_committed( reducer_name: &'static str, -) -> impl FnOnce(&ReducerEventContext, Result, InternalError>) + Send + 'static { +) -> impl FnOnce(&ReducerEventContext, Result, InternalError>) + Send + 'static { move |_ctx, outcome| match outcome { - Ok(Ok(())) => (), + Ok(Ok(_)) => (), Ok(Err(msg)) => panic!("`{reducer_name}` reducer returned error: {msg}"), Err(internal_error) => panic!("`{reducer_name}` reducer panicked: {internal_error:?}"), } From c59d370bc8612fe451532b5fda821cd40f4f5bdd Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 14:56:33 -0400 Subject: [PATCH 25/38] `cargo fmt` --- crates/bindings/src/rt.rs | 6 ++---- crates/client-api/src/routes/database.rs | 21 +++++++++++---------- crates/smoketests/tests/smoketests/call.rs | 6 +----- 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/crates/bindings/src/rt.rs b/crates/bindings/src/rt.rs index bf87da4bfb7..780e514e7dd 100644 --- a/crates/bindings/src/rt.rs +++ b/crates/bindings/src/rt.rs @@ -231,10 +231,8 @@ where { #[inline] fn into_result(self) -> Result>, Box> { - self.map(|value| { - Some(bsatn::to_vec(&value).expect("Failed to serialize reducer return value")) - }) - .map_err(|e| e.to_string().into()) + self.map(|value| Some(bsatn::to_vec(&value).expect("Failed to serialize reducer return value"))) + .map_err(|e| e.to_string().into()) } #[inline] diff --git a/crates/client-api/src/routes/database.rs b/crates/client-api/src/routes/database.rs index cc7ce935000..2921632d613 100644 --- a/crates/client-api/src/routes/database.rs +++ b/crates/client-api/src/routes/database.rs @@ -38,9 +38,9 @@ use spacetimedb_client_api_messages::name::{ PrePublishResult, PrettyPrintStyle, PublishOp, PublishResult, }; use spacetimedb_lib::bsatn; -use spacetimedb_lib::de::DeserializeSeed; use spacetimedb_lib::db::raw_def::v10::RawModuleDefV10; use spacetimedb_lib::db::raw_def::v9::RawModuleDefV9; +use spacetimedb_lib::de::DeserializeSeed; use spacetimedb_lib::{sats, AlgebraicValue, Hash, ProductValue, Timestamp}; use spacetimedb_schema::auto_migrate::{ MigrationPolicy as SchemaMigrationPolicy, MigrationToken, PrettyPrintStyle as AutoMigratePrettyPrintStyle, @@ -253,14 +253,12 @@ fn reducer_outcome_response( if let Some(bytes) = reducer_return_value.filter(|value| !value.is_empty()) { let seed = sats::WithTypespace::new(module.info.module_def.typespace(), &return_value); let mut reader = &bytes[..]; - let value: AlgebraicValue = seed - .deserialize(bsatn::Deserializer::new(&mut reader)) - .map_err(|err| { - ( - StatusCode::INTERNAL_SERVER_ERROR, - format!("Failed to decode reducer return value: {err}"), - ) - })?; + let value: AlgebraicValue = seed.deserialize(bsatn::Deserializer::new(&mut reader)).map_err(|err| { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Failed to decode reducer return value: {err}"), + ) + })?; Ok(( StatusCode::OK, axum::Json(sats::serde::SerdeWrapper(value)).into_response(), @@ -275,7 +273,10 @@ fn reducer_outcome_response( } ReducerOutcome::BudgetExceeded => { log::warn!("Node's energy budget exceeded for identity: {owner_identity} while executing {reducer}"); - Ok((StatusCode::PAYMENT_REQUIRED, "Module energy budget exhausted.".into_response())) + Ok(( + StatusCode::PAYMENT_REQUIRED, + "Module energy budget exhausted.".into_response(), + )) } } } diff --git a/crates/smoketests/tests/smoketests/call.rs b/crates/smoketests/tests/smoketests/call.rs index b33d4013e89..b0d570fc529 100644 --- a/crates/smoketests/tests/smoketests/call.rs +++ b/crates/smoketests/tests/smoketests/call.rs @@ -25,11 +25,7 @@ fn test_call_reducer_http_return_value() { let identity = test.database_identity.as_ref().unwrap(); let response = test - .api_call_json( - "POST", - &format!("/v1/database/{}/call/return_greeting", identity), - "[]", - ) + .api_call_json("POST", &format!("/v1/database/{}/call/return_greeting", identity), "[]") .unwrap(); assert_eq!(response.status_code, 200); From 94ba2e9d8848c7156e6283f801782cf74e086e8d Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 15:00:02 -0400 Subject: [PATCH 26/38] insta --- .../snapshots/codegen__codegen_rust.snap | 36 ++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/crates/codegen/tests/snapshots/codegen__codegen_rust.snap b/crates/codegen/tests/snapshots/codegen__codegen_rust.snap index 0fc95787a01..dabdc6dde06 100644 --- a/crates/codegen/tests/snapshots/codegen__codegen_rust.snap +++ b/crates/codegen/tests/snapshots/codegen__codegen_rust.snap @@ -74,7 +74,8 @@ impl add_player for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(AddPlayerArgs { name, }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(AddPlayerArgs { name, }, callback) } } @@ -151,7 +152,8 @@ impl add_private for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(AddPrivateArgs { name, }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(AddPrivateArgs { name, }, callback) } } @@ -233,7 +235,8 @@ age: u8, + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(AddArgs { name, age, }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(AddArgs { name, age, }, callback) } } @@ -304,7 +307,8 @@ impl assert_caller_identity_is_module_identity for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(AssertCallerIdentityIsModuleIdentityArgs { }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(AssertCallerIdentityIsModuleIdentityArgs { }, callback) } } @@ -406,7 +410,8 @@ impl delete_player for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePlayerArgs { id, }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePlayerArgs { id, }, callback) } } @@ -483,7 +488,8 @@ impl delete_players_by_name for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(DeletePlayersByNameArgs { name, }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(DeletePlayersByNameArgs { name, }, callback) } } @@ -709,7 +715,8 @@ impl list_over_age for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(ListOverAgeArgs { age, }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(ListOverAgeArgs { age, }, callback) } } @@ -780,7 +787,8 @@ impl log_module_identity for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(LogModuleIdentityArgs { }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(LogModuleIdentityArgs { }, callback) } } @@ -2976,7 +2984,8 @@ impl query_private for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(QueryPrivateArgs { }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(QueryPrivateArgs { }, callback) } } @@ -3234,7 +3243,8 @@ impl say_hello for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(SayHelloArgs { }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(SayHelloArgs { }, callback) } } @@ -3452,7 +3462,8 @@ impl test_btree_index_args for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(TestBtreeIndexArgsArgs { }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(TestBtreeIndexArgsArgs { }, callback) } } @@ -3973,7 +3984,8 @@ arg_4: NamespaceTestF, + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(TestArgs { arg, arg_2, arg_3, arg_4, }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(TestArgs { arg, arg_2, arg_3, arg_4, }, callback) } } From 232b7959b2e244ebfcaec1b0f9267e73f86371b5 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Sat, 28 Mar 2026 12:17:06 -0700 Subject: [PATCH 27/38] coordinator handles warehouse assignment for distributed runs --- tools/tpcc-runner/README.md | 30 ++++++++-------- tools/tpcc-runner/src/config.rs | 39 +++++++++++++++++++++ tools/tpcc-runner/src/coordinator.rs | 52 +++++++++++++++++++++++++--- tools/tpcc-runner/src/driver.rs | 30 +++++++++------- tools/tpcc-runner/src/protocol.rs | 12 +++++-- 5 files changed, 129 insertions(+), 34 deletions(-) diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md index 5a13f453287..9700f8024f2 100644 --- a/tools/tpcc-runner/README.md +++ b/tools/tpcc-runner/README.md @@ -16,9 +16,12 @@ Warehouses are assigned to databases in contiguous ranges: - database `1` owns the next `warehouses_per_database` - and so on -`--warehouses` is the total logical warehouse count in the benchmark. -`--warehouse-start` and `--warehouse-count` define the warehouse slice owned by -one driver. The driver always uses exactly `10` terminals per owned warehouse. +Without a coordinator, `--warehouses` is the total logical warehouse count in +the benchmark, and `--warehouse-start` plus `--warehouse-count` define the +warehouse slice owned by one driver. With a coordinator, the coordinator +assigns each driver its warehouse slice and the database topology, so those +warehouse flags are not needed on the driver command line. The driver always +uses exactly `10` terminals per owned warehouse. For multi-database runs, the `uri` passed to the loader and driver is also stored in the module and used for cross-database HTTP calls. In normal builds, @@ -167,29 +170,26 @@ running the commands below. Start the coordinator: ```bash -cargo run -p tpcc-runner -- coordinator --expected-drivers 2 --warmup-secs 5 --measure-secs 30 +cargo run -p tpcc-runner -- coordinator \ + --expected-drivers 2 \ + --warehouses 2 \ + --warehouses-per-database 1 \ + --warmup-secs 5 \ + --measure-secs 30 ``` -Start each remote driver with a disjoint warehouse slice. This example assumes -two databases with one warehouse each: +Start each remote driver. The coordinator assigns the warehouse slices. This +example assumes two databases with one warehouse each: ```bash cargo run --release -p tpcc-runner -- driver \ --uri http://public-server-host:3000 \ --database-prefix tpcc \ - --warehouses 2 \ - --warehouses-per-database 1 \ - --warehouse-start 1 \ - --warehouse-count 1 \ --coordinator-url http://coordinator-host:7878 cargo run --release -p tpcc-runner -- driver \ --uri http://public-server-host:3000 \ --database-prefix tpcc \ - --warehouses 2 \ - --warehouses-per-database 1 \ - --warehouse-start 2 \ - --warehouse-count 1 \ --coordinator-url http://coordinator-host:7878 ``` @@ -231,6 +231,8 @@ think_time_scale = 1.0 run_id = "tpcc-demo" listen = "127.0.0.1:7878" expected_drivers = 2 +warehouses = 2 +warehouses_per_database = 1 warmup_secs = 5 measure_secs = 30 output_dir = "tpcc-results/coordinator" diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs index 61ef3cfff82..843231bd8c5 100644 --- a/tools/tpcc-runner/src/config.rs +++ b/tools/tpcc-runner/src/config.rs @@ -5,6 +5,8 @@ use std::fs; use std::net::SocketAddr; use std::path::{Path, PathBuf}; +use crate::protocol::DriverAssignment; + #[derive(Debug, Parser)] #[command(name = "tpcc-runner")] pub struct Cli { @@ -63,6 +65,8 @@ pub struct CoordinatorConfig { pub run_id: String, pub listen: SocketAddr, pub expected_drivers: usize, + pub warehouses: u16, + pub warehouses_per_database: u16, pub warmup_secs: u64, pub measure_secs: u64, pub output_dir: PathBuf, @@ -125,6 +129,10 @@ pub struct CoordinatorArgs { #[arg(long)] pub expected_drivers: Option, #[arg(long)] + pub warehouses: Option, + #[arg(long)] + pub warehouses_per_database: Option, + #[arg(long)] pub warmup_secs: Option, #[arg(long)] pub measure_secs: Option, @@ -199,6 +207,8 @@ struct FileCoordinatorConfig { run_id: Option, listen: Option, expected_drivers: Option, + warehouses: Option, + warehouses_per_database: Option, warmup_secs: Option, measure_secs: Option, output_dir: Option, @@ -339,6 +349,24 @@ impl CoordinatorArgs { if expected_drivers == 0 { bail!("expected_drivers must be positive"); } + let warehouses = self.warehouses.or(file.coordinator.warehouses).unwrap_or(1); + if warehouses == 0 { + bail!("warehouses must be positive"); + } + let warehouses_per_database = self + .warehouses_per_database + .or(file.coordinator.warehouses_per_database) + .unwrap_or(warehouses); + if warehouses_per_database == 0 { + bail!("warehouses_per_database must be positive"); + } + if expected_drivers > usize::from(warehouses) { + bail!( + "expected_drivers {} exceeds total warehouses {}", + expected_drivers, + warehouses + ); + } Ok(CoordinatorConfig { run_id: self .run_id @@ -350,6 +378,8 @@ impl CoordinatorArgs { .or(file.coordinator.listen) .unwrap_or_else(|| "127.0.0.1:7878".parse().expect("hard-coded coordinator address")), expected_drivers, + warehouses, + warehouses_per_database, warmup_secs: self.warmup_secs.or(file.coordinator.warmup_secs).unwrap_or(5), measure_secs: self.measure_secs.or(file.coordinator.measure_secs).unwrap_or(30), output_dir: self @@ -370,6 +400,15 @@ pub fn default_driver_id() -> String { } impl DriverConfig { + pub fn with_assignment(&self, assignment: &DriverAssignment) -> Self { + let mut updated = self.clone(); + updated.warehouse_count = assignment.warehouse_count; + updated.warehouse_start = assignment.warehouse_start; + updated.driver_warehouse_count = assignment.driver_warehouse_count; + updated.warehouses_per_database = assignment.warehouses_per_database; + updated + } + pub fn warehouse_end(&self) -> u16 { self.warehouse_start + self.driver_warehouse_count - 1 } diff --git a/tools/tpcc-runner/src/coordinator.rs b/tools/tpcc-runner/src/coordinator.rs index f9060f5520d..fdfd6473d95 100644 --- a/tools/tpcc-runner/src/coordinator.rs +++ b/tools/tpcc-runner/src/coordinator.rs @@ -10,7 +10,8 @@ use std::sync::Arc; use crate::config::CoordinatorConfig; use crate::protocol::{ - RegisterDriverRequest, RegisterDriverResponse, RunSchedule, ScheduleResponse, SubmitSummaryRequest, + DriverAssignment, RegisterDriverRequest, RegisterDriverResponse, RunSchedule, ScheduleResponse, + SubmitSummaryRequest, }; use crate::summary::{aggregate_summaries, now_millis, write_json, AggregateSummary, DriverSummary}; @@ -21,11 +22,16 @@ struct AppState { struct CoordinatorState { config: CoordinatorConfig, - registrations: BTreeMap, + registrations: BTreeMap, + registration_order: Vec, schedule: Option, summaries: BTreeMap, } +struct DriverRegistration { + assignment: DriverAssignment, +} + pub async fn run(config: CoordinatorConfig) -> Result<()> { fs::create_dir_all(&config.output_dir) .with_context(|| format!("failed to create {}", config.output_dir.display()))?; @@ -34,6 +40,7 @@ pub async fn run(config: CoordinatorConfig) -> Result<()> { inner: Arc::new(Mutex::new(CoordinatorState { config: config.clone(), registrations: BTreeMap::new(), + registration_order: Vec::new(), schedule: None, summaries: BTreeMap::new(), })), @@ -57,9 +64,30 @@ async fn register_driver( Json(request): Json, ) -> Json { let mut inner = state.inner.lock(); - inner.registrations.insert(request.driver_id.clone(), request); + let assignment = match inner.registrations.get(&request.driver_id) { + Some(existing) => existing.assignment.clone(), + None => { + if inner.registration_order.len() >= inner.config.expected_drivers { + return Json(RegisterDriverResponse { + accepted: false, + assignment: None, + }); + } + let index = inner.registration_order.len(); + let assignment = assignment_for_index(&inner.config, index); + inner.registration_order.push(request.driver_id.clone()); + inner.registrations.insert( + request.driver_id.clone(), + DriverRegistration { assignment: assignment.clone() }, + ); + assignment + } + }; maybe_create_schedule(&mut inner); - Json(RegisterDriverResponse { accepted: true }) + Json(RegisterDriverResponse { + accepted: true, + assignment: Some(assignment), + }) } async fn get_schedule(State(state): State) -> Json { @@ -118,6 +146,22 @@ fn maybe_create_schedule(inner: &mut CoordinatorState) { ); } +fn assignment_for_index(config: &CoordinatorConfig, index: usize) -> DriverAssignment { + let total_warehouses = usize::from(config.warehouses); + let expected_drivers = config.expected_drivers; + let base = total_warehouses / expected_drivers; + let remainder = total_warehouses % expected_drivers; + let driver_warehouse_count = base + usize::from(index < remainder); + let warehouse_start = 1 + (index * base) + index.min(remainder); + + DriverAssignment { + warehouse_count: config.warehouses, + warehouses_per_database: config.warehouses_per_database, + warehouse_start: warehouse_start as u16, + driver_warehouse_count: driver_warehouse_count as u16, + } +} + fn write_aggregate(output_dir: &Path, aggregate: &AggregateSummary) -> Result<()> { let run_dir = output_dir.join(&aggregate.run_id); fs::create_dir_all(&run_dir).with_context(|| format!("failed to create {}", run_dir.display()))?; diff --git a/tools/tpcc-runner/src/driver.rs b/tools/tpcc-runner/src/driver.rs index 6e5ba57558e..fe23caec9bd 100644 --- a/tools/tpcc-runner/src/driver.rs +++ b/tools/tpcc-runner/src/driver.rs @@ -40,7 +40,7 @@ struct TransactionContext<'a> { } pub async fn run(config: DriverConfig) -> Result<()> { - let schedule = resolve_schedule(&config).await?; + let (config, schedule) = resolve_driver_setup(config).await?; let run_id = schedule.run_id.clone(); let output_dir = resolve_output_dir(&config, &run_id); fs::create_dir_all(&output_dir).with_context(|| format!("failed to create {}", output_dir.display()))?; @@ -468,14 +468,11 @@ fn execute_stock_level( }) } -async fn resolve_schedule(config: &DriverConfig) -> Result { +async fn resolve_driver_setup(config: DriverConfig) -> Result<(DriverConfig, RunSchedule)> { if let Some(coordinator_url) = &config.coordinator_url { let client = reqwest::Client::new(); let register = RegisterDriverRequest { driver_id: config.driver_id.clone(), - terminal_start: config.terminal_start(), - terminals: config.terminals(), - warehouse_count: config.warehouse_count, }; let response: RegisterDriverResponse = client .post(format!("{}/register", coordinator_url)) @@ -491,6 +488,10 @@ async fn resolve_schedule(config: &DriverConfig) -> Result { if !response.accepted { bail!("coordinator did not accept driver registration"); } + let Some(assignment) = response.assignment else { + bail!("coordinator accepted driver registration without an assignment"); + }; + let config = config.with_assignment(&assignment); loop { let response: ScheduleResponse = client .get(format!("{}/schedule", coordinator_url)) @@ -503,7 +504,7 @@ async fn resolve_schedule(config: &DriverConfig) -> Result { .await .context("failed to decode schedule response")?; if let Some(schedule) = response.schedule { - return Ok(schedule); + return Ok((config, schedule)); } tokio::time::sleep(Duration::from_millis(500)).await; } @@ -513,13 +514,16 @@ async fn resolve_schedule(config: &DriverConfig) -> Result { let warmup_start_ms = crate::summary::now_millis() + 2_000; let measure_start_ms = warmup_start_ms + (config.warmup_secs * 1_000); let measure_end_ms = measure_start_ms + (config.measure_secs * 1_000); - Ok(RunSchedule { - run_id, - warmup_start_ms, - measure_start_ms, - measure_end_ms, - stop_ms: measure_end_ms, - }) + Ok(( + config, + RunSchedule { + run_id, + warmup_start_ms, + measure_start_ms, + measure_end_ms, + stop_ms: measure_end_ms, + }, + )) } async fn harvest_delivery_completions( diff --git a/tools/tpcc-runner/src/protocol.rs b/tools/tpcc-runner/src/protocol.rs index 5d76fb94ea1..5289b531d12 100644 --- a/tools/tpcc-runner/src/protocol.rs +++ b/tools/tpcc-runner/src/protocol.rs @@ -2,17 +2,23 @@ use serde::{Deserialize, Serialize}; use crate::summary::DriverSummary; +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DriverAssignment { + pub warehouse_count: u16, + pub warehouses_per_database: u16, + pub warehouse_start: u16, + pub driver_warehouse_count: u16, +} + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RegisterDriverRequest { pub driver_id: String, - pub terminal_start: u32, - pub terminals: u32, - pub warehouse_count: u16, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RegisterDriverResponse { pub accepted: bool, + pub assignment: Option, } #[derive(Clone, Debug, Serialize, Deserialize)] From 8ae04d06581f2ad3d77301ab9b8a689b3644f68c Mon Sep 17 00:00:00 2001 From: John Detter <4099508+jdetter@users.noreply.github.com> Date: Sat, 28 Mar 2026 15:57:35 -0500 Subject: [PATCH 28/38] Debug for timeout error --- tools/tpcc-runner/src/client.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs index 0dd6e67cc00..c2ffba7feb3 100644 --- a/tools/tpcc-runner/src/client.rs +++ b/tools/tpcc-runner/src/client.rs @@ -147,6 +147,7 @@ impl ModuleClient { } pub fn load_items(&self, rows: Vec) -> Result<()> { + let id = rows[0].i_id; let (tx, rx) = sync_channel(1); self.conn.reducers.load_items_then(rows, move |_, res| { log::debug!("Got response from `load_items`: {res:?}"); @@ -156,7 +157,7 @@ impl ModuleClient { Ok(Ok(Ok(()))) => Ok(()), Ok(Ok(Err(message))) => bail!("load_items failed: {}", message), Ok(Err(err)) => Err(anyhow!("load_items internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_items"), + Err(_) => bail!("timed out waiting for load_items {}", id), } } From 84737a6baf9887bc63c8d288640b3667b7f5e990 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 17:27:58 -0400 Subject: [PATCH 29/38] `log_stopwatch` when loading --- modules/tpcc/src/lib.rs | 15 ++++++++++++++- modules/tpcc/src/remote.rs | 11 +++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index eb146e748f1..994c781307a 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,5 +1,7 @@ +use remote::clear_remote_warehouses; use spacetimedb::{ - procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, + log_stopwatch::LogStopwatch, procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, + SpacetimeType, Table, Timestamp, }; use std::collections::BTreeSet; @@ -342,11 +344,13 @@ pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { for row in ctx.db.warehouse().iter() { ctx.db.warehouse().delete(row); } + clear_remote_warehouses(ctx); Ok(()) } #[reducer] pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_warehouses"); for row in rows { validate_warehouse_row(&row)?; ctx.db.warehouse().insert(row); @@ -356,6 +360,7 @@ pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), #[reducer] pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_districts"); for row in rows { validate_district_row(&row)?; ctx.db.district().insert(row); @@ -365,6 +370,7 @@ pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), S #[reducer] pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_customers"); for row in rows { validate_customer_row(&row)?; ctx.db.customer().insert(row); @@ -374,6 +380,7 @@ pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), S #[reducer] pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_history"); for mut row in rows { row.history_id = 0; ctx.db.history().insert(row); @@ -383,6 +390,7 @@ pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), Stri #[reducer] pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_items"); for row in rows { validate_item_row(&row)?; ctx.db.item().insert(row); @@ -392,15 +400,18 @@ pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { #[reducer] pub fn load_stocks(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_stocks"); for row in rows { validate_stock_row(&row)?; ctx.db.stock().insert(row); } + Ok(()) } #[reducer] pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_orders"); for row in rows { ctx.db.oorder().insert(row); } @@ -409,6 +420,7 @@ pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String #[reducer] pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_new_orders"); for row in rows { ctx.db.new_order_row().insert(row); } @@ -417,6 +429,7 @@ pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), #[reducer] pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_order_lines"); for row in rows { ctx.db.order_line().insert(row); } diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index cc129a025e9..89452c8ce40 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -2,8 +2,8 @@ use std::time::Duration; use http::Request; use spacetimedb::{ - http::Timeout, reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, Table, TimeDuration, - TxContext, + http::Timeout, log_stopwatch::LogStopwatch, reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, + Table, TimeDuration, TxContext, }; use spacetimedb_sats::bsatn; @@ -38,12 +38,19 @@ pub struct RemoteWarehouse { #[reducer] fn load_remote_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_remote_warehouses"); for row in rows { ctx.db.remote_warehouse().try_insert(row)?; } Ok(()) } +pub fn clear_remote_warehouses(ctx: &ReducerContext) { + for row in ctx.db.remote_warehouse().iter() { + ctx.db.remote_warehouse().delete(row); + } +} + pub fn remote_warehouse_home(ctx: &ReducerContext, warehouse_id: WarehouseId) -> Option { ctx.db .remote_warehouse() From 79b8c1ac617ec2ac0266e034f359ac288daebf4d Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 17:33:35 -0400 Subject: [PATCH 30/38] Revert "`log_stopwatch` when loading" This reverts commit 84737a6baf9887bc63c8d288640b3667b7f5e990. Was supposed to be on a different branch. --- modules/tpcc/src/lib.rs | 15 +-------------- modules/tpcc/src/remote.rs | 11 ++--------- 2 files changed, 3 insertions(+), 23 deletions(-) diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index 994c781307a..eb146e748f1 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,7 +1,5 @@ -use remote::clear_remote_warehouses; use spacetimedb::{ - log_stopwatch::LogStopwatch, procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, - SpacetimeType, Table, Timestamp, + procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, }; use std::collections::BTreeSet; @@ -344,13 +342,11 @@ pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { for row in ctx.db.warehouse().iter() { ctx.db.warehouse().delete(row); } - clear_remote_warehouses(ctx); Ok(()) } #[reducer] pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_warehouses"); for row in rows { validate_warehouse_row(&row)?; ctx.db.warehouse().insert(row); @@ -360,7 +356,6 @@ pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), #[reducer] pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_districts"); for row in rows { validate_district_row(&row)?; ctx.db.district().insert(row); @@ -370,7 +365,6 @@ pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), S #[reducer] pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_customers"); for row in rows { validate_customer_row(&row)?; ctx.db.customer().insert(row); @@ -380,7 +374,6 @@ pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), S #[reducer] pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_history"); for mut row in rows { row.history_id = 0; ctx.db.history().insert(row); @@ -390,7 +383,6 @@ pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), Stri #[reducer] pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_items"); for row in rows { validate_item_row(&row)?; ctx.db.item().insert(row); @@ -400,18 +392,15 @@ pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { #[reducer] pub fn load_stocks(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_stocks"); for row in rows { validate_stock_row(&row)?; ctx.db.stock().insert(row); } - Ok(()) } #[reducer] pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_orders"); for row in rows { ctx.db.oorder().insert(row); } @@ -420,7 +409,6 @@ pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String #[reducer] pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_new_orders"); for row in rows { ctx.db.new_order_row().insert(row); } @@ -429,7 +417,6 @@ pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), #[reducer] pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_order_lines"); for row in rows { ctx.db.order_line().insert(row); } diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index 89452c8ce40..cc129a025e9 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -2,8 +2,8 @@ use std::time::Duration; use http::Request; use spacetimedb::{ - http::Timeout, log_stopwatch::LogStopwatch, reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, - Table, TimeDuration, TxContext, + http::Timeout, reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, Table, TimeDuration, + TxContext, }; use spacetimedb_sats::bsatn; @@ -38,19 +38,12 @@ pub struct RemoteWarehouse { #[reducer] fn load_remote_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { - let _timer = LogStopwatch::new("load_remote_warehouses"); for row in rows { ctx.db.remote_warehouse().try_insert(row)?; } Ok(()) } -pub fn clear_remote_warehouses(ctx: &ReducerContext) { - for row in ctx.db.remote_warehouse().iter() { - ctx.db.remote_warehouse().delete(row); - } -} - pub fn remote_warehouse_home(ctx: &ReducerContext, warehouse_id: WarehouseId) -> Option { ctx.db .remote_warehouse() From a0eef62dbd3991b363f92434e5beea9fc3f46d98 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 17:36:54 -0400 Subject: [PATCH 31/38] `log_stopwatch` when loading --- modules/tpcc/src/lib.rs | 14 +++++++++++++- modules/tpcc/src/remote.rs | 6 ++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index eb146e748f1..9fa6c683204 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,5 +1,7 @@ +use remote::reset_remote_warehouses; use spacetimedb::{ - procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, + log_stopwatch::LogStopwatch, procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, + SpacetimeType, Table, Timestamp, }; use std::collections::BTreeSet; @@ -342,11 +344,13 @@ pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { for row in ctx.db.warehouse().iter() { ctx.db.warehouse().delete(row); } + reset_remote_warehouses(ctx); Ok(()) } #[reducer] pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_warehouses"); for row in rows { validate_warehouse_row(&row)?; ctx.db.warehouse().insert(row); @@ -356,6 +360,7 @@ pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), #[reducer] pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_districts"); for row in rows { validate_district_row(&row)?; ctx.db.district().insert(row); @@ -365,6 +370,7 @@ pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), S #[reducer] pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_customers"); for row in rows { validate_customer_row(&row)?; ctx.db.customer().insert(row); @@ -374,6 +380,7 @@ pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), S #[reducer] pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_history"); for mut row in rows { row.history_id = 0; ctx.db.history().insert(row); @@ -383,6 +390,7 @@ pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), Stri #[reducer] pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_items"); for row in rows { validate_item_row(&row)?; ctx.db.item().insert(row); @@ -392,6 +400,7 @@ pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { #[reducer] pub fn load_stocks(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_stocks"); for row in rows { validate_stock_row(&row)?; ctx.db.stock().insert(row); @@ -401,6 +410,7 @@ pub fn load_stocks(ctx: &ReducerContext, rows: Vec) -> Result<(), String> #[reducer] pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_orders"); for row in rows { ctx.db.oorder().insert(row); } @@ -409,6 +419,7 @@ pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String #[reducer] pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_new_orders"); for row in rows { ctx.db.new_order_row().insert(row); } @@ -417,6 +428,7 @@ pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), #[reducer] pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + let _timer = LogStopwatch::new("load_order_lines"); for row in rows { ctx.db.order_line().insert(row); } diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index cc129a025e9..bea5a610852 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -44,6 +44,12 @@ fn load_remote_warehouses(ctx: &ReducerContext, rows: Vec) -> R Ok(()) } +pub fn reset_remote_warehouses(ctx: &ReducerContext) { + for row in ctx.db.remote_warehouse().iter() { + ctx.db.remote_warehouse().delete(row); + } +} + pub fn remote_warehouse_home(ctx: &ReducerContext, warehouse_id: WarehouseId) -> Option { ctx.db .remote_warehouse() From 090c0705f5867cfa7080ac9d494e3ab3b3b0f00c Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sat, 28 Mar 2026 18:06:43 -0400 Subject: [PATCH 32/38] Vibecode: pipeline loading of TPC-C seed data --- tools/tpcc-runner/src/client.rs | 310 ++++++++++++++++++++------------ tools/tpcc-runner/src/loader.rs | 88 +++++++-- 2 files changed, 268 insertions(+), 130 deletions(-) diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs index c2ffba7feb3..ef58bc51c6f 100644 --- a/tools/tpcc-runner/src/client.rs +++ b/tools/tpcc-runner/src/client.rs @@ -1,5 +1,6 @@ use anyhow::{anyhow, bail, Context, Result}; use std::sync::mpsc::sync_channel; +use std::sync::{Arc, Condvar, Mutex}; use std::thread::JoinHandle; use std::time::Duration; @@ -76,145 +77,198 @@ impl ModuleClient { } } - pub fn load_remote_warehouses(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_remote_warehouses_then(rows, move |_, res| { - log::debug!("Got response from `load_remote_warehouse`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_remote_warehouses failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_remote_warehouses internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_remote_warehouses"), + pub fn queue_load_remote_warehouses( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self + .conn + .reducers + .load_remote_warehouses_then(rows, move |_, res| { + handle_reducer_result("load_remote_warehouses", res, &errors); + decrement_pending(&pending_for_callback); + }) + { + decrement_pending(pending); + return Err(anyhow!("load_remote_warehouses send error: {err}")); } + Ok(()) } - pub fn load_warehouses(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_warehouses_then(rows, move |_, res| { - log::debug!("Got response from `load_warehouses`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_warehouses failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_warehouses internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_warehouses"), + pub fn queue_load_warehouses( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_warehouses_then(rows, move |_, res| { + handle_reducer_result("load_warehouses", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_warehouses send error: {err}")); } + Ok(()) } - pub fn load_districts(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_districts_then(rows, move |_, res| { - log::debug!("Got response from `load_districts`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_districts failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_districts internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_districts"), + pub fn queue_load_districts( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_districts_then(rows, move |_, res| { + handle_reducer_result("load_districts", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_districts send error: {err}")); } + Ok(()) } - pub fn load_customers(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_customers_then(rows, move |_, res| { - log::debug!("Got response from `load_customers`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_customers failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_customers internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_customers"), + pub fn queue_load_customers( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_customers_then(rows, move |_, res| { + handle_reducer_result("load_customers", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_customers send error: {err}")); } + Ok(()) } - pub fn load_history(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_history_then(rows, move |_, res| { - log::debug!("Got response from `load_history`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_history failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_history internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_history"), + pub fn queue_load_history( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_history_then(rows, move |_, res| { + handle_reducer_result("load_history", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_history send error: {err}")); } + Ok(()) } - pub fn load_items(&self, rows: Vec) -> Result<()> { - let id = rows[0].i_id; - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_items_then(rows, move |_, res| { - log::debug!("Got response from `load_items`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_items failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_items internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_items {}", id), + pub fn queue_load_items( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_items_then(rows, move |_, res| { + handle_reducer_result("load_items", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_items send error: {err}")); } + Ok(()) } - pub fn load_stocks(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_stocks_then(rows, move |_, res| { - log::debug!("Got response from `load_stocks`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_stocks failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_stocks internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_stocks"), + pub fn queue_load_stocks( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_stocks_then(rows, move |_, res| { + handle_reducer_result("load_stocks", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_stocks send error: {err}")); } + Ok(()) } - pub fn load_orders(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_orders_then(rows, move |_, res| { - log::debug!("Got response from `load_orders`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_orders failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_orders internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_orders"), + pub fn queue_load_orders( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_orders_then(rows, move |_, res| { + handle_reducer_result("load_orders", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_orders send error: {err}")); } + Ok(()) } - pub fn load_new_orders(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_new_orders_then(rows, move |_, res| { - log::debug!("Got response from `load_new_orders`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_new_orders failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_new_orders internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_new_orders"), + pub fn queue_load_new_orders( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_new_orders_then(rows, move |_, res| { + handle_reducer_result("load_new_orders", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_new_orders send error: {err}")); } + Ok(()) } - pub fn load_order_lines(&self, rows: Vec) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn.reducers.load_order_lines_then(rows, move |_, res| { - log::debug!("Got response from `load_order_lines`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("load_order_lines failed: {}", message), - Ok(Err(err)) => Err(anyhow!("load_order_lines internal error: {}", err)), - Err(_) => bail!("timed out waiting for load_order_lines"), + pub fn queue_load_order_lines( + &self, + rows: Vec, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, + ) -> Result<()> { + increment_pending(pending); + let pending_for_callback = Arc::clone(pending); + let errors = Arc::clone(errors); + if let Err(err) = self.conn.reducers.load_order_lines_then(rows, move |_, res| { + handle_reducer_result("load_order_lines", res, &errors); + decrement_pending(&pending_for_callback); + }) { + decrement_pending(pending); + return Err(anyhow!("load_order_lines send error: {err}")); } + Ok(()) } pub fn new_order( @@ -372,6 +426,40 @@ impl ModuleClient { } } +fn increment_pending(pending: &Arc<(Mutex, Condvar)>) { + let (lock, _) = &**pending; + let mut guard = lock.lock().unwrap(); + *guard += 1; +} + +fn decrement_pending(pending: &Arc<(Mutex, Condvar)>) { + let (lock, cvar) = &**pending; + let mut guard = lock.lock().unwrap(); + *guard = guard.saturating_sub(1); + if *guard == 0 { + cvar.notify_all(); + } +} + +fn handle_reducer_result( + name: &'static str, + res: Result, spacetimedb_sdk::__codegen::InternalError>, + errors: &Arc>>, +) { + let maybe_error = match res { + Ok(Ok(())) => None, + Ok(Err(message)) => Some(anyhow!("{name} failed: {message}")), + Err(err) => Some(anyhow!("{name} internal error: {err}")), + }; + + if let Some(err) = maybe_error { + let mut guard = errors.lock().unwrap(); + if guard.is_none() { + *guard = Some(err); + } + } +} + pub fn expect_ok(operation: &str, result: Result>) -> Result { match result? { Ok(value) => Ok(value), diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index 058e223d46d..19b3ab27175 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -1,6 +1,7 @@ use anyhow::{Context, Result}; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use std::ops::Range; +use std::sync::{Arc, Condvar, Mutex}; use std::thread; use std::time::SystemTime; @@ -88,6 +89,9 @@ fn configure_one_database(config: &LoadConfig, database_number: u16, topology: & let load_c_last = rng.random_range(0..=255); let base_ts = Timestamp::from(SystemTime::now()); + let pending = Arc::new((Mutex::new(0_u64), Condvar::new())); + let errors = Arc::new(Mutex::new(None)); + load_remote_warehouses( &client, database_number, @@ -95,8 +99,10 @@ fn configure_one_database(config: &LoadConfig, database_number: u16, topology: & config.warehouses_per_database, config.batch_size, topology, + &pending, + &errors, )?; - load_items(&client, config.batch_size, &mut rng)?; + load_items(&client, config.batch_size, &mut rng, &pending, &errors)?; load_warehouses_and_districts( &client, database_number, @@ -104,6 +110,8 @@ fn configure_one_database(config: &LoadConfig, database_number: u16, topology: & config.batch_size, base_ts, &mut rng, + &pending, + &errors, )?; load_stock( &client, @@ -111,6 +119,8 @@ fn configure_one_database(config: &LoadConfig, database_number: u16, topology: & config.warehouses_per_database, config.batch_size, &mut rng, + &pending, + &errors, )?; load_customers_history_orders( &client, @@ -120,15 +130,26 @@ fn configure_one_database(config: &LoadConfig, database_number: u16, topology: & base_ts, load_c_last, &mut rng, + &pending, + &errors, )?; + wait_for_pending(&pending); + take_first_error(&errors)?; + client.shutdown(); log::info!("tpcc load for database {database} finished"); Ok(()) } -fn load_items(client: &ModuleClient, batch_size: usize, rng: &mut StdRng) -> Result<()> { +fn load_items( + client: &ModuleClient, + batch_size: usize, + rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, +) -> Result<()> { let mut batch = Vec::with_capacity(batch_size); for item_id in 1..=ITEMS { batch.push(Item { @@ -139,11 +160,11 @@ fn load_items(client: &ModuleClient, batch_size: usize, rng: &mut StdRng) -> Res i_data: maybe_with_original(rng, 26, 50), }); if batch.len() >= batch_size { - client.load_items(std::mem::take(&mut batch))?; + client.queue_load_items(std::mem::take(&mut batch), &pending, &errors)?; } } if !batch.is_empty() { - client.load_items(batch)?; + client.queue_load_items(batch, &pending, &errors)?; } Ok(()) } @@ -161,6 +182,8 @@ fn load_remote_warehouses( warehouses_per_database: u16, batch_size: usize, topology: &DatabaseTopology, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, ) -> Result<()> { let mut warehouse_batch = Vec::with_capacity(batch_size); @@ -182,7 +205,7 @@ fn load_remote_warehouses( let split_at = warehouse_batch.len().min(batch_size); let remainder = warehouse_batch.split_off(split_at); let rows = std::mem::replace(&mut warehouse_batch, remainder); - client.load_remote_warehouses(rows)?; + client.queue_load_remote_warehouses(rows, &pending, &errors)?; } Ok(()) @@ -195,6 +218,8 @@ fn load_warehouses_and_districts( batch_size: usize, timestamp: Timestamp, rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, ) -> Result<()> { let mut warehouse_batch = Vec::with_capacity(batch_size); let mut district_batch = Vec::with_capacity(batch_size); @@ -234,13 +259,13 @@ fn load_warehouses_and_districts( let split_at = warehouse_batch.len().min(batch_size); let remainder = warehouse_batch.split_off(split_at); let rows = std::mem::replace(&mut warehouse_batch, remainder); - client.load_warehouses(rows)?; + client.queue_load_warehouses(rows, &pending, &errors)?; } while !district_batch.is_empty() { let split_at = district_batch.len().min(batch_size); let remainder = district_batch.split_off(split_at); let rows = std::mem::replace(&mut district_batch, remainder); - client.load_districts(rows)?; + client.queue_load_districts(rows, &pending, &errors)?; } let _ = timestamp; Ok(()) @@ -252,6 +277,8 @@ fn load_stock( warehouses_per_database: u16, batch_size: usize, rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, ) -> Result<()> { let mut batch = Vec::with_capacity(batch_size); for w_id in warehouses_range(database_number, warehouses_per_database) { @@ -277,12 +304,12 @@ fn load_stock( s_data: maybe_with_original(rng, 26, 50), }); if batch.len() >= batch_size { - client.load_stocks(std::mem::take(&mut batch))?; + client.queue_load_stocks(std::mem::take(&mut batch), &pending, &errors)?; } } } if !batch.is_empty() { - client.load_stocks(batch)?; + client.queue_load_stocks(batch, &pending, &errors)?; } Ok(()) } @@ -295,6 +322,8 @@ fn load_customers_history_orders( timestamp: Timestamp, load_c_last: u32, rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, ) -> Result<()> { let mut customer_batch = Vec::with_capacity(batch_size); let mut history_batch = Vec::with_capacity(batch_size); @@ -351,10 +380,10 @@ fn load_customers_history_orders( }); if customer_batch.len() >= batch_size { - client.load_customers(std::mem::take(&mut customer_batch))?; + client.queue_load_customers(std::mem::take(&mut customer_batch), &pending, &errors)?; } if history_batch.len() >= batch_size { - client.load_history(std::mem::take(&mut history_batch))?; + client.queue_load_history(std::mem::take(&mut history_batch), &pending, &errors)?; } } @@ -401,35 +430,56 @@ fn load_customers_history_orders( ol_dist_info: alpha_string(rng, 24, 24), }); if order_line_batch.len() >= batch_size { - client.load_order_lines(std::mem::take(&mut order_line_batch))?; + client.queue_load_order_lines( + std::mem::take(&mut order_line_batch), + &pending, + &errors, + )?; } } if order_batch.len() >= batch_size { - client.load_orders(std::mem::take(&mut order_batch))?; + client.queue_load_orders(std::mem::take(&mut order_batch), &pending, &errors)?; } if new_order_batch.len() >= batch_size { - client.load_new_orders(std::mem::take(&mut new_order_batch))?; + client.queue_load_new_orders(std::mem::take(&mut new_order_batch), &pending, &errors)?; } } } } if !customer_batch.is_empty() { - client.load_customers(customer_batch)?; + client.queue_load_customers(customer_batch, &pending, &errors)?; } if !history_batch.is_empty() { - client.load_history(history_batch)?; + client.queue_load_history(history_batch, &pending, &errors)?; } if !order_batch.is_empty() { - client.load_orders(order_batch)?; + client.queue_load_orders(order_batch, &pending, &errors)?; } if !new_order_batch.is_empty() { - client.load_new_orders(new_order_batch)?; + client.queue_load_new_orders(new_order_batch, &pending, &errors)?; } if !order_line_batch.is_empty() { - client.load_order_lines(order_line_batch)?; + client.queue_load_order_lines(order_line_batch, &pending, &errors)?; } Ok(()) } + +fn wait_for_pending(pending: &Arc<(Mutex, Condvar)>) { + let (lock, cvar) = pending.as_ref(); + let mut guard = lock.lock().unwrap(); + while *guard > 0 { + guard = cvar.wait(guard).unwrap(); + } +} + +fn take_first_error(errors: &Arc>>) -> Result<()> { + let mut guard = errors.lock().unwrap(); + if let Some(err) = guard.take() { + Err(err) + } else { + Ok(()) + } +} From 474ce4550141412ebf9533427c646bedf5a4a3c7 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Sat, 28 Mar 2026 15:28:50 -0700 Subject: [PATCH 33/38] server-side datagen --- modules/tpcc/Cargo.toml | 2 +- modules/tpcc/src/lib.rs | 10 +- modules/tpcc/src/load.rs | 926 ++++++++++++++++++ modules/tpcc/src/remote.rs | 17 +- tools/tpcc-runner/README.md | 36 +- tools/tpcc-runner/src/client.rs | 103 +- tools/tpcc-runner/src/config.rs | 27 +- tools/tpcc-runner/src/coordinator.rs | 4 +- tools/tpcc-runner/src/legacy_loader.rs | 484 +++++++++ tools/tpcc-runner/src/lib.rs | 2 + tools/tpcc-runner/src/loader.rs | 480 ++------- .../configure_tpcc_load_reducer.rs | 68 ++ tools/tpcc-runner/src/module_bindings/mod.rs | 67 +- .../restart_tpcc_load_reducer.rs | 61 ++ .../resume_tpcc_load_reducer.rs | 61 ++ .../start_tpcc_load_reducer.rs | 61 ++ .../tpcc_load_config_request_type.rs | 23 + .../module_bindings/tpcc_load_config_type.rs | 76 ++ .../src/module_bindings/tpcc_load_job_type.rs | 69 ++ .../module_bindings/tpcc_load_phase_type.rs | 24 + .../module_bindings/tpcc_load_state_table.rs | 161 +++ .../module_bindings/tpcc_load_state_type.rs | 88 ++ .../module_bindings/tpcc_load_status_type.rs | 22 + 23 files changed, 2446 insertions(+), 426 deletions(-) create mode 100644 modules/tpcc/src/load.rs create mode 100644 tools/tpcc-runner/src/legacy_loader.rs create mode 100644 tools/tpcc-runner/src/module_bindings/configure_tpcc_load_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/restart_tpcc_load_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/resume_tpcc_load_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/start_tpcc_load_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/tpcc_load_config_request_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/tpcc_load_config_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/tpcc_load_job_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/tpcc_load_phase_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/tpcc_load_state_table.rs create mode 100644 tools/tpcc-runner/src/module_bindings/tpcc_load_state_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/tpcc_load_status_type.rs diff --git a/modules/tpcc/Cargo.toml b/modules/tpcc/Cargo.toml index a82e91cb971..190f0559509 100644 --- a/modules/tpcc/Cargo.toml +++ b/modules/tpcc/Cargo.toml @@ -9,7 +9,7 @@ crate-type = ["cdylib"] [dependencies] anyhow.workspace = true log.workspace = true -spacetimedb = { workspace = true, features = ["unstable"] } +spacetimedb = { workspace = true, features = ["unstable", "rand"] } spacetimedb-sats = { workspace = true, features = ["serde"] } http.workspace = true serde_json.workspace = true diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index afef01c8bcb..87a1d193757 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,4 +1,3 @@ -use remote::reset_remote_warehouses; use spacetimedb::{ log_stopwatch::LogStopwatch, procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, @@ -13,6 +12,7 @@ macro_rules! ensure { }; } +mod load; mod new_order; mod payment; mod remote; @@ -311,6 +311,12 @@ pub struct DeliveryCompletion { #[reducer] pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { + clear_tpcc_business_tables(ctx); + load::clear_load_metadata(ctx); + Ok(()) +} + +pub(crate) fn clear_tpcc_business_tables(ctx: &ReducerContext) { for row in ctx.db.delivery_job().iter() { ctx.db.delivery_job().delete(row); } @@ -344,8 +350,6 @@ pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { for row in ctx.db.warehouse().iter() { ctx.db.warehouse().delete(row); } - reset_remote_warehouses(ctx); - Ok(()) } #[reducer] diff --git a/modules/tpcc/src/load.rs b/modules/tpcc/src/load.rs new file mode 100644 index 00000000000..51933bbca8d --- /dev/null +++ b/modules/tpcc/src/load.rs @@ -0,0 +1,926 @@ +use spacetimedb::rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use spacetimedb::{reducer, table, Identity, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp}; + +use crate::{ + customer, district, history, item, + new_order::pack_order_line_key, + new_order_row, oorder, order_line, + remote::{ + clear_remote_warehouses, clear_spacetimedb_uri, replace_remote_warehouses, replace_spacetimedb_uri, + RemoteWarehouse, + }, + stock, warehouse, Customer, District, History, Item, NewOrder, OOrder, OrderLine, Stock, Warehouse, WarehouseId, + CUSTOMERS_PER_DISTRICT, DISTRICTS_PER_WAREHOUSE, ITEMS, +}; + +const LOAD_SINGLETON_ID: u8 = 1; +const WAREHOUSE_YTD_CENTS: i64 = 30_000_000; +const DISTRICT_YTD_CENTS: i64 = 3_000_000; +const CUSTOMER_CREDIT_LIMIT_CENTS: i64 = 5_000_000; +const CUSTOMER_INITIAL_BALANCE_CENTS: i64 = -1_000; +const CUSTOMER_INITIAL_YTD_PAYMENT_CENTS: i64 = 1_000; +const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; +const NEW_ORDER_START: u32 = 2_101; + +const TAG_ITEM: u64 = 0x1000; +const TAG_WAREHOUSE: u64 = 0x2000; +const TAG_DISTRICT: u64 = 0x3000; +const TAG_STOCK: u64 = 0x4000; +const TAG_CUSTOMER: u64 = 0x5000; +const TAG_HISTORY: u64 = 0x6000; +const TAG_ORDER_PERMUTATION: u64 = 0x7000; +const TAG_ORDER: u64 = 0x8000; +const TAG_ORDER_LINE: u64 = 0x9000; + +#[derive(Clone, Copy, Debug, Eq, PartialEq, SpacetimeType)] +pub enum TpccLoadStatus { + Idle, + Running, + Failed, + Complete, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, SpacetimeType)] +pub enum TpccLoadPhase { + Items, + WarehousesDistricts, + Stock, + CustomersHistory, + Orders, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct TpccLoadConfigRequest { + pub database_number: u16, + pub num_databases: u16, + pub warehouses_per_database: u16, + pub batch_size: u32, + pub seed: u64, + pub load_c_last: u32, + pub base_ts: Timestamp, + pub spacetimedb_uri: String, + pub database_identities: Vec, +} + +#[table(accessor = tpcc_load_config)] +#[derive(Clone, Debug)] +pub struct TpccLoadConfig { + #[primary_key] + pub singleton_id: u8, + pub database_number: u16, + pub num_databases: u16, + pub warehouses_per_database: u16, + pub batch_size: u32, + pub seed: u64, + pub load_c_last: u32, + pub base_ts: Timestamp, + pub spacetimedb_uri: String, + pub database_identities: Vec, +} + +#[table(accessor = tpcc_load_state, public)] +#[derive(Clone, Debug)] +pub struct TpccLoadState { + #[primary_key] + pub singleton_id: u8, + pub status: TpccLoadStatus, + pub phase: TpccLoadPhase, + pub next_warehouse_id: WarehouseId, + pub next_district_id: u8, + pub next_item_id: u32, + pub next_order_id: u32, + pub chunks_completed: u64, + pub rows_inserted: u64, + pub last_error: Option, + pub started_at: Option, + pub updated_at: Timestamp, + pub completed_at: Option, +} + +#[table(accessor = tpcc_load_job, scheduled(run_tpcc_load_chunk))] +#[derive(Clone, Debug)] +pub struct TpccLoadJob { + #[primary_key] + #[auto_inc] + pub scheduled_id: u64, + pub scheduled_at: ScheduleAt, + pub phase: TpccLoadPhase, + pub next_warehouse_id: WarehouseId, + pub next_district_id: u8, + pub next_item_id: u32, + pub next_order_id: u32, +} + +#[reducer] +pub fn configure_tpcc_load(ctx: &ReducerContext, request: TpccLoadConfigRequest) -> Result<(), String> { + if ctx.db.tpcc_load_job().iter().next().is_some() { + return Err("tpcc load is already running".into()); + } + configure_tpcc_load_internal(ctx, request) +} + +#[reducer] +pub fn start_tpcc_load(ctx: &ReducerContext) -> Result<(), String> { + if ctx.db.tpcc_load_job().iter().next().is_some() { + return Err("tpcc load is already running".into()); + } + + let mut state = current_state(ctx)?; + if state.status == TpccLoadStatus::Complete { + return Err("tpcc load has already completed; use restart_tpcc_load to run again".into()); + } + state.status = TpccLoadStatus::Running; + state.last_error = None; + state.started_at = Some(ctx.timestamp); + state.updated_at = ctx.timestamp; + state.completed_at = None; + replace_state(ctx, state.clone()); + insert_job(ctx, job_from_state(&state, ctx.timestamp)); + Ok(()) +} + +#[reducer] +pub fn resume_tpcc_load(ctx: &ReducerContext) -> Result<(), String> { + if ctx.db.tpcc_load_job().iter().next().is_some() { + return Err("tpcc load is already running".into()); + } + + let mut state = current_state(ctx)?; + if state.status == TpccLoadStatus::Complete { + return Err("tpcc load has already completed".into()); + } + state.status = TpccLoadStatus::Running; + state.last_error = None; + state.updated_at = ctx.timestamp; + replace_state(ctx, state.clone()); + insert_job(ctx, job_from_state(&state, ctx.timestamp)); + Ok(()) +} + +#[reducer] +pub fn restart_tpcc_load(ctx: &ReducerContext) -> Result<(), String> { + let request = config_as_request(¤t_config(ctx)?); + crate::clear_tpcc_business_tables(ctx); + configure_tpcc_load_internal(ctx, request)?; + start_tpcc_load(ctx) +} + +#[reducer] +pub fn run_tpcc_load_chunk(ctx: &ReducerContext, job: TpccLoadJob) -> Result<(), String> { + let config = current_config(ctx)?; + let state = current_state(ctx)?; + if state.status != TpccLoadStatus::Running { + fail_load(ctx, state, "tpcc load state is not running".into()); + return Ok(()); + } + + let result = run_chunk(ctx, &config, &job); + match result { + Ok(advance) => { + let mut next_state = state; + next_state.phase = advance.phase; + next_state.next_warehouse_id = advance.next_warehouse_id; + next_state.next_district_id = advance.next_district_id; + next_state.next_item_id = advance.next_item_id; + next_state.next_order_id = advance.next_order_id; + next_state.chunks_completed = next_state.chunks_completed.saturating_add(1); + next_state.rows_inserted = next_state.rows_inserted.saturating_add(advance.rows_inserted); + next_state.updated_at = ctx.timestamp; + + if advance.complete { + next_state.status = TpccLoadStatus::Complete; + next_state.completed_at = Some(ctx.timestamp); + replace_state(ctx, next_state); + } else { + replace_state(ctx, next_state.clone()); + insert_job(ctx, job_from_state(&next_state, ctx.timestamp)); + } + } + Err(error) => fail_load(ctx, state, error), + } + + Ok(()) +} + +pub(crate) fn clear_load_metadata(ctx: &ReducerContext) { + for row in ctx.db.tpcc_load_job().iter() { + ctx.db.tpcc_load_job().delete(row); + } + for row in ctx.db.tpcc_load_state().iter() { + ctx.db.tpcc_load_state().delete(row); + } + for row in ctx.db.tpcc_load_config().iter() { + ctx.db.tpcc_load_config().delete(row); + } + clear_remote_warehouses(ctx); + clear_spacetimedb_uri(ctx); +} + +fn configure_tpcc_load_internal(ctx: &ReducerContext, request: TpccLoadConfigRequest) -> Result<(), String> { + validate_request(&request)?; + clear_load_metadata(ctx); + + replace_spacetimedb_uri(ctx, request.spacetimedb_uri.clone()); + replace_remote_warehouses(ctx, build_remote_warehouses(&request))?; + let state = initial_state(&request, ctx.timestamp); + + ctx.db.tpcc_load_config().insert(TpccLoadConfig { + singleton_id: LOAD_SINGLETON_ID, + database_number: request.database_number, + num_databases: request.num_databases, + warehouses_per_database: request.warehouses_per_database, + batch_size: request.batch_size, + seed: request.seed, + load_c_last: request.load_c_last, + base_ts: request.base_ts, + spacetimedb_uri: request.spacetimedb_uri, + database_identities: request.database_identities, + }); + + replace_state(ctx, state); + Ok(()) +} + +fn validate_request(request: &TpccLoadConfigRequest) -> Result<(), String> { + if request.num_databases == 0 { + return Err("num_databases must be positive".into()); + } + if request.warehouses_per_database == 0 { + return Err("warehouses_per_database must be positive".into()); + } + if request.batch_size == 0 { + return Err("batch_size must be positive".into()); + } + if usize::from(request.num_databases) != request.database_identities.len() { + return Err("database_identities length must match num_databases".into()); + } + if request.database_number >= request.num_databases { + return Err("database_number must be less than num_databases".into()); + } + Ok(()) +} + +fn initial_state(request: &TpccLoadConfigRequest, now: Timestamp) -> TpccLoadState { + TpccLoadState { + singleton_id: LOAD_SINGLETON_ID, + status: TpccLoadStatus::Idle, + phase: TpccLoadPhase::Items, + next_warehouse_id: warehouse_start(request.database_number, request.warehouses_per_database), + next_district_id: 1, + next_item_id: 1, + next_order_id: 1, + chunks_completed: 0, + rows_inserted: 0, + last_error: None, + started_at: None, + updated_at: now, + completed_at: None, + } +} + +fn config_as_request(config: &TpccLoadConfig) -> TpccLoadConfigRequest { + TpccLoadConfigRequest { + database_number: config.database_number, + num_databases: config.num_databases, + warehouses_per_database: config.warehouses_per_database, + batch_size: config.batch_size, + seed: config.seed, + load_c_last: config.load_c_last, + base_ts: config.base_ts, + spacetimedb_uri: config.spacetimedb_uri.clone(), + database_identities: config.database_identities.clone(), + } +} + +fn build_remote_warehouses(request: &TpccLoadConfigRequest) -> Vec { + let mut rows = Vec::new(); + for other_database_number in 0..request.num_databases { + if other_database_number == request.database_number { + continue; + } + let database_ident = request.database_identities[usize::from(other_database_number)]; + for w_id in warehouse_range(other_database_number, request.warehouses_per_database) { + rows.push(RemoteWarehouse { + w_id, + remote_database_home: database_ident, + }); + } + } + rows +} + +fn current_config(ctx: &ReducerContext) -> Result { + ctx.db + .tpcc_load_config() + .singleton_id() + .find(LOAD_SINGLETON_ID) + .ok_or_else(|| "tpcc load has not been configured".to_string()) +} + +fn current_state(ctx: &ReducerContext) -> Result { + ctx.db + .tpcc_load_state() + .singleton_id() + .find(LOAD_SINGLETON_ID) + .ok_or_else(|| "tpcc load state row is missing".to_string()) +} + +fn replace_state(ctx: &ReducerContext, state: TpccLoadState) { + ctx.db.tpcc_load_state().singleton_id().delete(LOAD_SINGLETON_ID); + ctx.db.tpcc_load_state().insert(state); +} + +fn insert_job(ctx: &ReducerContext, job: TpccLoadJob) { + ctx.db.tpcc_load_job().insert(job); +} + +fn fail_load(ctx: &ReducerContext, mut state: TpccLoadState, error: String) { + state.status = TpccLoadStatus::Failed; + state.last_error = Some(error); + state.updated_at = ctx.timestamp; + replace_state(ctx, state); +} + +fn job_from_state(state: &TpccLoadState, now: Timestamp) -> TpccLoadJob { + TpccLoadJob { + scheduled_id: 0, + scheduled_at: now.into(), + phase: state.phase, + next_warehouse_id: state.next_warehouse_id, + next_district_id: state.next_district_id, + next_item_id: state.next_item_id, + next_order_id: state.next_order_id, + } +} + +struct ChunkAdvance { + phase: TpccLoadPhase, + next_warehouse_id: WarehouseId, + next_district_id: u8, + next_item_id: u32, + next_order_id: u32, + rows_inserted: u64, + complete: bool, +} + +fn run_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { + match job.phase { + TpccLoadPhase::Items => load_item_chunk(ctx, config, job), + TpccLoadPhase::WarehousesDistricts => load_warehouse_district_chunk(ctx, config, job), + TpccLoadPhase::Stock => load_stock_chunk(ctx, config, job), + TpccLoadPhase::CustomersHistory => load_customer_history_chunk(ctx, config, job), + TpccLoadPhase::Orders => load_order_chunk(ctx, config, job), + } +} + +fn load_item_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { + if job.next_item_id == 0 || job.next_item_id > ITEMS { + return Err(format!("invalid item cursor {}", job.next_item_id)); + } + let chunk_end = (job.next_item_id + config.batch_size - 1).min(ITEMS); + for item_id in job.next_item_id..=chunk_end { + ctx.db.item().insert(generate_item(config, item_id)); + } + + let next_phase = if chunk_end == ITEMS { + TpccLoadPhase::WarehousesDistricts + } else { + TpccLoadPhase::Items + }; + Ok(ChunkAdvance { + phase: next_phase, + next_warehouse_id: warehouse_start(config.database_number, config.warehouses_per_database), + next_district_id: 1, + next_item_id: if chunk_end == ITEMS { 1 } else { chunk_end + 1 }, + next_order_id: 1, + rows_inserted: u64::from(chunk_end - job.next_item_id + 1), + complete: false, + }) +} + +fn load_warehouse_district_chunk( + ctx: &ReducerContext, + config: &TpccLoadConfig, + job: &TpccLoadJob, +) -> Result { + let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); + if job.next_warehouse_id < warehouse_start(config.database_number, config.warehouses_per_database) + || job.next_warehouse_id > end_warehouse + { + return Err(format!("invalid warehouse cursor {}", job.next_warehouse_id)); + } + + ctx.db + .warehouse() + .insert(generate_warehouse(config, job.next_warehouse_id)); + for d_id in 1..=DISTRICTS_PER_WAREHOUSE { + ctx.db + .district() + .insert(generate_district(config, job.next_warehouse_id, d_id)); + } + + Ok(ChunkAdvance { + phase: if job.next_warehouse_id == end_warehouse { + TpccLoadPhase::Stock + } else { + TpccLoadPhase::WarehousesDistricts + }, + next_warehouse_id: if job.next_warehouse_id == end_warehouse { + warehouse_start(config.database_number, config.warehouses_per_database) + } else { + job.next_warehouse_id + 1 + }, + next_district_id: 1, + next_item_id: 1, + next_order_id: 1, + rows_inserted: 1 + u64::from(DISTRICTS_PER_WAREHOUSE), + complete: false, + }) +} + +fn load_stock_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { + let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); + let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); + if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { + return Err(format!("invalid stock warehouse cursor {}", job.next_warehouse_id)); + } + if job.next_item_id == 0 || job.next_item_id > ITEMS { + return Err(format!("invalid stock item cursor {}", job.next_item_id)); + } + + let chunk_end = (job.next_item_id + config.batch_size - 1).min(ITEMS); + for item_id in job.next_item_id..=chunk_end { + ctx.db + .stock() + .insert(generate_stock(config, job.next_warehouse_id, item_id)); + } + + let (phase, next_warehouse_id, next_item_id, next_district_id) = if chunk_end < ITEMS { + (TpccLoadPhase::Stock, job.next_warehouse_id, chunk_end + 1, 1) + } else if job.next_warehouse_id < end_warehouse { + (TpccLoadPhase::Stock, job.next_warehouse_id + 1, 1, 1) + } else { + (TpccLoadPhase::CustomersHistory, start_warehouse, 1, 1) + }; + + Ok(ChunkAdvance { + phase, + next_warehouse_id, + next_district_id, + next_item_id, + next_order_id: 1, + rows_inserted: u64::from(chunk_end - job.next_item_id + 1), + complete: false, + }) +} + +fn load_customer_history_chunk( + ctx: &ReducerContext, + config: &TpccLoadConfig, + job: &TpccLoadJob, +) -> Result { + let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); + let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); + if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { + return Err(format!("invalid customer warehouse cursor {}", job.next_warehouse_id)); + } + if !(1..=DISTRICTS_PER_WAREHOUSE).contains(&job.next_district_id) { + return Err(format!("invalid customer district cursor {}", job.next_district_id)); + } + + for c_id in 1..=CUSTOMERS_PER_DISTRICT { + ctx.db.customer().insert(generate_customer( + config, + job.next_warehouse_id, + job.next_district_id, + c_id, + )); + ctx.db.history().insert(generate_history( + config, + job.next_warehouse_id, + job.next_district_id, + c_id, + )); + } + + let (phase, next_warehouse_id, next_district_id, next_order_id) = advance_district( + job.next_warehouse_id, + job.next_district_id, + start_warehouse, + end_warehouse, + TpccLoadPhase::CustomersHistory, + ); + let (phase, next_warehouse_id, next_district_id, next_order_id) = if phase == TpccLoadPhase::CustomersHistory { + (phase, next_warehouse_id, next_district_id, next_order_id) + } else { + (TpccLoadPhase::Orders, start_warehouse, 1, 1) + }; + + Ok(ChunkAdvance { + phase, + next_warehouse_id, + next_district_id, + next_item_id: 1, + next_order_id, + rows_inserted: u64::from(CUSTOMERS_PER_DISTRICT) * 2, + complete: false, + }) +} + +fn load_order_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { + let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); + let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); + if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { + return Err(format!("invalid order warehouse cursor {}", job.next_warehouse_id)); + } + if !(1..=DISTRICTS_PER_WAREHOUSE).contains(&job.next_district_id) { + return Err(format!("invalid order district cursor {}", job.next_district_id)); + } + if job.next_order_id == 0 || job.next_order_id > CUSTOMERS_PER_DISTRICT { + return Err(format!("invalid order cursor {}", job.next_order_id)); + } + + let chunk_end = (job.next_order_id + config.batch_size - 1).min(CUSTOMERS_PER_DISTRICT); + let permutation = customer_permutation(config, job.next_warehouse_id, job.next_district_id); + let mut rows_inserted = 0u64; + + for o_id in job.next_order_id..=chunk_end { + let customer_id = permutation[(o_id - 1) as usize]; + let mut order_rng = deterministic_rng( + config.seed, + TAG_ORDER, + &[ + u64::from(job.next_warehouse_id), + u64::from(job.next_district_id), + u64::from(o_id), + ], + ); + let delivered = o_id < NEW_ORDER_START; + let order_line_count = order_rng.gen_range(5..=15) as u8; + ctx.db.oorder().insert(OOrder { + order_key: crate::pack_order_key(job.next_warehouse_id, job.next_district_id, o_id), + o_w_id: job.next_warehouse_id, + o_d_id: job.next_district_id, + o_id, + o_c_id: customer_id, + o_entry_d: config.base_ts, + o_carrier_id: if delivered { + Some(order_rng.gen_range(1..=10)) + } else { + None + }, + o_ol_cnt: order_line_count, + o_all_local: true, + }); + rows_inserted += 1; + + if !delivered { + ctx.db.new_order_row().insert(NewOrder { + new_order_key: crate::pack_order_key(job.next_warehouse_id, job.next_district_id, o_id), + no_w_id: job.next_warehouse_id, + no_d_id: job.next_district_id, + no_o_id: o_id, + }); + rows_inserted += 1; + } + + for ol_number in 1..=order_line_count { + let mut line_rng = deterministic_rng( + config.seed, + TAG_ORDER_LINE, + &[ + u64::from(job.next_warehouse_id), + u64::from(job.next_district_id), + u64::from(o_id), + u64::from(ol_number), + ], + ); + ctx.db.order_line().insert(OrderLine { + order_line_key: pack_order_line_key(job.next_warehouse_id, job.next_district_id, o_id, ol_number), + ol_w_id: job.next_warehouse_id, + ol_d_id: job.next_district_id, + ol_o_id: o_id, + ol_number, + ol_i_id: line_rng.gen_range(1..=ITEMS), + ol_supply_w_id: job.next_warehouse_id, + ol_delivery_d: if delivered { Some(config.base_ts) } else { None }, + ol_quantity: 5, + ol_amount_cents: if delivered { 0 } else { line_rng.gen_range(1..=999_999) }, + ol_dist_info: alpha_string(&mut line_rng, 24, 24), + }); + rows_inserted += 1; + } + } + + if chunk_end < CUSTOMERS_PER_DISTRICT { + return Ok(ChunkAdvance { + phase: TpccLoadPhase::Orders, + next_warehouse_id: job.next_warehouse_id, + next_district_id: job.next_district_id, + next_item_id: 1, + next_order_id: chunk_end + 1, + rows_inserted, + complete: false, + }); + } + + let complete = is_last_order_district(job.next_warehouse_id, job.next_district_id, end_warehouse); + if complete { + return Ok(ChunkAdvance { + phase: TpccLoadPhase::Orders, + next_warehouse_id: end_warehouse, + next_district_id: DISTRICTS_PER_WAREHOUSE, + next_item_id: 1, + next_order_id: CUSTOMERS_PER_DISTRICT, + rows_inserted, + complete: true, + }); + } + + let (_, next_warehouse_id, next_district_id, next_order_id) = advance_district( + job.next_warehouse_id, + job.next_district_id, + start_warehouse, + end_warehouse, + TpccLoadPhase::Orders, + ); + + Ok(ChunkAdvance { + phase: TpccLoadPhase::Orders, + next_warehouse_id, + next_district_id, + next_item_id: 1, + next_order_id, + rows_inserted, + complete: false, + }) +} + +fn advance_district( + warehouse_id: WarehouseId, + district_id: u8, + start_warehouse: WarehouseId, + end_warehouse: WarehouseId, + phase: TpccLoadPhase, +) -> (TpccLoadPhase, WarehouseId, u8, u32) { + if district_id < DISTRICTS_PER_WAREHOUSE { + return (phase, warehouse_id, district_id + 1, 1); + } + if warehouse_id < end_warehouse { + return (phase, warehouse_id + 1, 1, 1); + } + (TpccLoadPhase::Orders, start_warehouse, 1, 1) +} + +fn is_last_order_district(warehouse_id: WarehouseId, district_id: u8, end_warehouse: WarehouseId) -> bool { + warehouse_id == end_warehouse && district_id == DISTRICTS_PER_WAREHOUSE +} + +fn generate_item(config: &TpccLoadConfig, item_id: u32) -> Item { + let mut rng = deterministic_rng(config.seed, TAG_ITEM, &[u64::from(item_id)]); + Item { + i_id: item_id, + i_im_id: rng.gen_range(1..=10_000), + i_name: alpha_numeric_string(&mut rng, 14, 24), + i_price_cents: rng.gen_range(100..=10_000), + i_data: maybe_with_original(&mut rng, 26, 50), + } +} + +fn generate_warehouse(config: &TpccLoadConfig, warehouse_id: WarehouseId) -> Warehouse { + let mut rng = deterministic_rng(config.seed, TAG_WAREHOUSE, &[u64::from(warehouse_id)]); + Warehouse { + w_id: warehouse_id, + w_name: alpha_string(&mut rng, 6, 10), + w_street_1: alpha_numeric_string(&mut rng, 10, 20), + w_street_2: alpha_numeric_string(&mut rng, 10, 20), + w_city: alpha_string(&mut rng, 10, 20), + w_state: alpha_string(&mut rng, 2, 2), + w_zip: zip_code(&mut rng), + w_tax_bps: rng.gen_range(0..=2_000), + w_ytd_cents: WAREHOUSE_YTD_CENTS, + } +} + +fn generate_district(config: &TpccLoadConfig, warehouse_id: WarehouseId, district_id: u8) -> District { + let mut rng = deterministic_rng( + config.seed, + TAG_DISTRICT, + &[u64::from(warehouse_id), u64::from(district_id)], + ); + District { + district_key: crate::pack_district_key(warehouse_id, district_id), + d_w_id: warehouse_id, + d_id: district_id, + d_name: alpha_string(&mut rng, 6, 10), + d_street_1: alpha_numeric_string(&mut rng, 10, 20), + d_street_2: alpha_numeric_string(&mut rng, 10, 20), + d_city: alpha_string(&mut rng, 10, 20), + d_state: alpha_string(&mut rng, 2, 2), + d_zip: zip_code(&mut rng), + d_tax_bps: rng.gen_range(0..=2_000), + d_ytd_cents: DISTRICT_YTD_CENTS, + d_next_o_id: CUSTOMERS_PER_DISTRICT + 1, + } +} + +fn generate_stock(config: &TpccLoadConfig, warehouse_id: WarehouseId, item_id: u32) -> Stock { + let mut rng = deterministic_rng(config.seed, TAG_STOCK, &[u64::from(warehouse_id), u64::from(item_id)]); + Stock { + stock_key: crate::pack_stock_key(warehouse_id, item_id), + s_w_id: warehouse_id, + s_i_id: item_id, + s_quantity: rng.gen_range(10..=100), + s_dist_01: alpha_string(&mut rng, 24, 24), + s_dist_02: alpha_string(&mut rng, 24, 24), + s_dist_03: alpha_string(&mut rng, 24, 24), + s_dist_04: alpha_string(&mut rng, 24, 24), + s_dist_05: alpha_string(&mut rng, 24, 24), + s_dist_06: alpha_string(&mut rng, 24, 24), + s_dist_07: alpha_string(&mut rng, 24, 24), + s_dist_08: alpha_string(&mut rng, 24, 24), + s_dist_09: alpha_string(&mut rng, 24, 24), + s_dist_10: alpha_string(&mut rng, 24, 24), + s_ytd: 0, + s_order_cnt: 0, + s_remote_cnt: 0, + s_data: maybe_with_original(&mut rng, 26, 50), + } +} + +fn generate_customer( + config: &TpccLoadConfig, + warehouse_id: WarehouseId, + district_id: u8, + customer_id: u32, +) -> Customer { + let mut rng = deterministic_rng( + config.seed, + TAG_CUSTOMER, + &[u64::from(warehouse_id), u64::from(district_id), u64::from(customer_id)], + ); + let credit = if rng.gen_bool(0.10) { "BC" } else { "GC" }; + let last_name = if customer_id <= 1_000 { + make_last_name(customer_id - 1) + } else { + make_last_name(nurand(&mut rng, 255, 0, 999, config.load_c_last)) + }; + Customer { + customer_key: crate::pack_customer_key(warehouse_id, district_id, customer_id), + c_w_id: warehouse_id, + c_d_id: district_id, + c_id: customer_id, + c_first: alpha_string(&mut rng, 8, 16), + c_middle: "OE".to_string(), + c_last: last_name, + c_street_1: alpha_numeric_string(&mut rng, 10, 20), + c_street_2: alpha_numeric_string(&mut rng, 10, 20), + c_city: alpha_string(&mut rng, 10, 20), + c_state: alpha_string(&mut rng, 2, 2), + c_zip: zip_code(&mut rng), + c_phone: numeric_string(&mut rng, 16, 16), + c_since: config.base_ts, + c_credit: credit.to_string(), + c_credit_lim_cents: CUSTOMER_CREDIT_LIMIT_CENTS, + c_discount_bps: rng.gen_range(0..=5_000), + c_balance_cents: CUSTOMER_INITIAL_BALANCE_CENTS, + c_ytd_payment_cents: CUSTOMER_INITIAL_YTD_PAYMENT_CENTS, + c_payment_cnt: 1, + c_delivery_cnt: 0, + c_data: alpha_numeric_string(&mut rng, 300, 500), + } +} + +fn generate_history(config: &TpccLoadConfig, warehouse_id: WarehouseId, district_id: u8, customer_id: u32) -> History { + let mut rng = deterministic_rng( + config.seed, + TAG_HISTORY, + &[u64::from(warehouse_id), u64::from(district_id), u64::from(customer_id)], + ); + History { + history_id: 0, + h_c_id: customer_id, + h_c_d_id: district_id, + h_c_w_id: warehouse_id, + h_d_id: district_id, + h_w_id: warehouse_id, + h_date: config.base_ts, + h_amount_cents: HISTORY_INITIAL_AMOUNT_CENTS, + h_data: alpha_numeric_string(&mut rng, 12, 24), + } +} + +fn customer_permutation(config: &TpccLoadConfig, warehouse_id: WarehouseId, district_id: u8) -> Vec { + let mut permutation: Vec = (1..=CUSTOMERS_PER_DISTRICT).collect(); + let mut rng = deterministic_rng( + config.seed, + TAG_ORDER_PERMUTATION, + &[u64::from(warehouse_id), u64::from(district_id)], + ); + permutation.shuffle(&mut rng); + permutation +} + +fn warehouse_range(database_number: u16, warehouses_per_database: u16) -> std::ops::Range { + let start = warehouse_start(database_number, warehouses_per_database); + let end = start + warehouses_per_database; + start..end +} + +fn warehouse_start(database_number: u16, warehouses_per_database: u16) -> WarehouseId { + database_number * warehouses_per_database + 1 +} + +fn warehouse_end(database_number: u16, warehouses_per_database: u16) -> WarehouseId { + warehouse_start(database_number, warehouses_per_database) + warehouses_per_database - 1 +} + +fn deterministic_rng(seed: u64, tag: u64, parts: &[u64]) -> StdRng { + StdRng::seed_from_u64(mix_seed(seed, tag, parts)) +} + +fn mix_seed(seed: u64, tag: u64, parts: &[u64]) -> u64 { + let mut value = splitmix64(seed ^ tag); + for part in parts { + value = splitmix64(value ^ *part); + } + value +} + +fn splitmix64(mut value: u64) -> u64 { + value = value.wrapping_add(0x9E37_79B9_7F4A_7C15); + let mut z = value; + z = (z ^ (z >> 30)).wrapping_mul(0xBF58_476D_1CE4_E5B9); + z = (z ^ (z >> 27)).wrapping_mul(0x94D0_49BB_1331_11EB); + z ^ (z >> 31) +} + +fn nurand(rng: &mut StdRng, a: u32, x: u32, y: u32, c: u32) -> u32 { + (((rng.gen_range(0..=a) | rng.gen_range(x..=y)) + c) % (y - x + 1)) + x +} + +fn alpha_string(rng: &mut StdRng, min_len: usize, max_len: usize) -> String { + let len = rng.gen_range(min_len..=max_len); + (0..len).map(|_| (b'A' + rng.gen_range(0..26)) as char).collect() +} + +fn numeric_string(rng: &mut StdRng, min_len: usize, max_len: usize) -> String { + let len = rng.gen_range(min_len..=max_len); + (0..len).map(|_| (b'0' + rng.gen_range(0..10)) as char).collect() +} + +fn alpha_numeric_string(rng: &mut StdRng, min_len: usize, max_len: usize) -> String { + let len = rng.gen_range(min_len..=max_len); + (0..len) + .map(|_| { + if rng.gen_bool(0.5) { + (b'A' + rng.gen_range(0..26)) as char + } else { + (b'0' + rng.gen_range(0..10)) as char + } + }) + .collect() +} + +fn zip_code(rng: &mut StdRng) -> String { + format!("{}11111", numeric_string(rng, 4, 4)) +} + +fn maybe_with_original(rng: &mut StdRng, min_len: usize, max_len: usize) -> String { + let mut data = alpha_numeric_string(rng, min_len, max_len); + if rng.gen_bool(0.10) && data.len() >= 8 { + let start = rng.gen_range(0..=(data.len() - 8)); + data.replace_range(start..start + 8, "ORIGINAL"); + } + data +} + +fn make_last_name(num: u32) -> String { + const PARTS: [&str; 10] = [ + "BAR", "OUGHT", "ABLE", "PRI", "PRES", "ESE", "ANTI", "CALLY", "ATION", "EING", + ]; + let hundreds = ((num / 100) % 10) as usize; + let tens = ((num / 10) % 10) as usize; + let ones = (num % 10) as usize; + format!("{}{}{}", PARTS[hundreds], PARTS[tens], PARTS[ones]) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn detects_last_order_district_for_completion() { + assert!(is_last_order_district(2, DISTRICTS_PER_WAREHOUSE, 2)); + assert!(!is_last_order_district(1, DISTRICTS_PER_WAREHOUSE, 2)); + assert!(!is_last_order_district(2, DISTRICTS_PER_WAREHOUSE - 1, 2)); + } + + #[test] + fn advance_district_wraps_back_to_start_after_last_district() { + assert_eq!( + advance_district(2, DISTRICTS_PER_WAREHOUSE, 1, 2, TpccLoadPhase::Orders), + (TpccLoadPhase::Orders, 1, 1, 1) + ); + } +} diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index bea5a610852..cb4a895f5a1 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -16,10 +16,18 @@ struct SpacetimeDbUri { #[reducer] fn set_spacetimedb_uri(ctx: &ReducerContext, uri: String) { + replace_spacetimedb_uri(ctx, uri); +} + +pub fn replace_spacetimedb_uri(ctx: &ReducerContext, uri: String) { + clear_spacetimedb_uri(ctx); + ctx.db.spacetimedb_uri().insert(SpacetimeDbUri { uri }); +} + +pub fn clear_spacetimedb_uri(ctx: &ReducerContext) { for row in ctx.db.spacetimedb_uri().iter() { ctx.db.spacetimedb_uri().delete(row); } - ctx.db.spacetimedb_uri().insert(SpacetimeDbUri { uri }); } pub fn get_spacetimedb_uri(tx: &TxContext) -> String { @@ -38,13 +46,18 @@ pub struct RemoteWarehouse { #[reducer] fn load_remote_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + replace_remote_warehouses(ctx, rows) +} + +pub fn replace_remote_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + clear_remote_warehouses(ctx); for row in rows { ctx.db.remote_warehouse().try_insert(row)?; } Ok(()) } -pub fn reset_remote_warehouses(ctx: &ReducerContext) { +pub fn clear_remote_warehouses(ctx: &ReducerContext) { for row in ctx.db.remote_warehouse().iter() { ctx.db.remote_warehouse().delete(row); } diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md index 9700f8024f2..ab00e8a8ad4 100644 --- a/tools/tpcc-runner/README.md +++ b/tools/tpcc-runner/README.md @@ -2,9 +2,10 @@ `tpcc-runner` is the Rust-side harness for the SpacetimeDB TPC-C module in `modules/tpcc`. -It supports three subcommands: +It supports four subcommands: -- `load`: populate the module with the initial TPC-C dataset +- `load`: configure and start server-side generation of the initial TPC-C dataset +- `load-client`: use the legacy client-push loader that sends row batches over the SDK connection - `driver`: run one benchmark driver with one logical terminal per SDK connection - `coordinator`: synchronize multiple remote drivers and aggregate their summaries @@ -84,7 +85,11 @@ cargo run -p spacetimedb-cli -- publish \ tpcc-1 ``` -4. Load data. For one warehouse in one database: +4. Load data. The loader configures each database once, starts a server-side +chunked load, and waits for the module's public load-state row to reach +`Complete`. + +For one warehouse in one database: ```bash cargo run --release -p tpcc-runner -- load \ @@ -106,9 +111,10 @@ cargo run --release -p tpcc-runner -- load \ --reset true ``` -To load databases in parallel, add `--load-parallelism `. The loader runs -databases concurrently but still loads each individual database in the normal -table order. If you omit the flag, it defaults to `min(num_databases, 8)`. +To load databases in parallel, add `--load-parallelism `. The loader starts +that many databases concurrently and each database then loads itself +server-side in chunks. If you omit the flag, it defaults to +`min(num_databases, 8)`. For example, to load those two local databases in parallel: @@ -122,6 +128,24 @@ cargo run --release -p tpcc-runner -- load \ --reset true ``` +`--batch-size` still matters for `load`, but it now controls the server-side +chunk size for phases like items, stock, and orders instead of the number of +rows pushed over the websocket by the client. + +If you need the old behavior for comparison or debugging, `load-client` keeps +the previous client-side row-push path and uses the same `--num-databases`, +`--warehouses-per-database`, `--load-parallelism`, `--batch-size`, and +`--reset` flags: + +```bash +cargo run --release -p tpcc-runner -- load-client \ + --uri http://127.0.0.1:3000 \ + --database-prefix tpcc \ + --num-databases 1 \ + --warehouses-per-database 1 \ + --reset true +``` + 5. Run a single local driver against one warehouse: ```bash diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs index ef58bc51c6f..b205ebbbe1f 100644 --- a/tools/tpcc-runner/src/client.rs +++ b/tools/tpcc-runner/src/client.rs @@ -6,12 +6,14 @@ use std::time::Duration; use crate::config::ConnectionConfig; use crate::module_bindings::*; -use spacetimedb_sdk::{DbContext, Identity}; +use spacetimedb_sdk::{DbContext, Identity, Table as _}; pub struct ModuleClient { conn: DbConnection, thread: Option>, timeout: Duration, + disconnect_error: Arc>>, + load_state_subscription: Option, } impl ModuleClient { @@ -19,6 +21,8 @@ impl ModuleClient { let (ready_tx, ready_rx) = sync_channel(1); let success_tx = ready_tx.clone(); let error_tx = ready_tx; + let disconnect_error = Arc::new(Mutex::new(None)); + let disconnect_error_callback = Arc::clone(&disconnect_error); let mut builder = DbConnection::builder() .with_uri(config.uri.clone()) .with_database_name(database_identity.to_string()) @@ -28,6 +32,13 @@ impl ModuleClient { }) .on_connect_error(move |_, error| { let _ = error_tx.send(Err(anyhow!("connection failed: {error}"))); + }) + .on_disconnect(move |_, error| { + let message = match error { + Some(error) => format!("connection closed: {error}"), + None => "connection closed".to_string(), + }; + *disconnect_error_callback.lock().expect("disconnect mutex poisoned") = Some(message); }); if let Some(token) = &config.token { @@ -44,9 +55,53 @@ impl ModuleClient { conn, thread: Some(thread), timeout: Duration::from_secs(config.timeout_secs), + disconnect_error, + load_state_subscription: None, }) } + pub fn subscribe_load_state(&mut self) -> Result<()> { + if self.load_state_subscription.is_some() { + return Ok(()); + } + + let (tx, rx) = sync_channel(1); + let success_tx = tx.clone(); + let handle = self + .conn + .subscription_builder() + .on_applied(move |_| { + let _ = success_tx.send(Ok::<(), anyhow::Error>(())); + }) + .on_error(move |_, error| { + let _ = tx.send(Err(anyhow!("load state subscription failed: {error}"))); + }) + .subscribe(["SELECT * FROM tpcc_load_state"]); + + match rx.recv_timeout(self.timeout) { + Ok(Ok(())) => { + self.load_state_subscription = Some(handle); + Ok(()) + } + Ok(Err(err)) => Err(err), + Err(_) => { + self.ensure_connected()?; + bail!("timed out waiting for load state subscription") + } + } + } + + pub fn load_state(&self) -> Option { + self.conn.db.tpcc_load_state().iter().next() + } + + pub fn ensure_connected(&self) -> Result<()> { + if let Some(message) = self.disconnect_error.lock().expect("disconnect mutex poisoned").clone() { + bail!("{message}"); + } + Ok(()) + } + pub fn set_spacetimedb_uri(&self, uri: &str) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn @@ -86,14 +141,10 @@ impl ModuleClient { increment_pending(pending); let pending_for_callback = Arc::clone(pending); let errors = Arc::clone(errors); - if let Err(err) = self - .conn - .reducers - .load_remote_warehouses_then(rows, move |_, res| { - handle_reducer_result("load_remote_warehouses", res, &errors); - decrement_pending(&pending_for_callback); - }) - { + if let Err(err) = self.conn.reducers.load_remote_warehouses_then(rows, move |_, res| { + handle_reducer_result("load_remote_warehouses", res, &errors); + decrement_pending(&pending_for_callback); + }) { decrement_pending(pending); return Err(anyhow!("load_remote_warehouses send error: {err}")); } @@ -271,6 +322,40 @@ impl ModuleClient { Ok(()) } + pub fn configure_tpcc_load(&self, request: TpccLoadConfigRequest) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.configure_tpcc_load_then(request, move |_, res| { + log::debug!("Got response from `configure_tpcc_load`: {res:?}"); + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("configure_tpcc_load failed: {}", message), + Ok(Err(err)) => Err(anyhow!("configure_tpcc_load internal error: {}", err)), + Err(_) => { + self.ensure_connected()?; + bail!("timed out waiting for configure_tpcc_load") + } + } + } + + pub fn start_tpcc_load(&self) -> Result<()> { + let (tx, rx) = sync_channel(1); + self.conn.reducers.start_tpcc_load_then(move |_, res| { + log::debug!("Got response from `start_tpcc_load`: {res:?}"); + let _ = tx.send(res); + })?; + match rx.recv_timeout(self.timeout) { + Ok(Ok(Ok(()))) => Ok(()), + Ok(Ok(Err(message))) => bail!("start_tpcc_load failed: {}", message), + Ok(Err(err)) => Err(anyhow!("start_tpcc_load internal error: {}", err)), + Err(_) => { + self.ensure_connected()?; + bail!("timed out waiting for start_tpcc_load") + } + } + } + pub fn new_order( &self, w_id: u16, diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs index 843231bd8c5..5cf5f61b83a 100644 --- a/tools/tpcc-runner/src/config.rs +++ b/tools/tpcc-runner/src/config.rs @@ -19,6 +19,7 @@ pub struct Cli { #[derive(Debug, Subcommand)] pub enum Command { Load(LoadArgs), + LoadClient(LoadArgs), Driver(DriverArgs), Coordinator(CoordinatorArgs), } @@ -264,12 +265,16 @@ impl LoadArgs { if load_parallelism == 0 { bail!("load_parallelism must be positive"); } + let batch_size = self.batch_size.or(file.load.batch_size).unwrap_or(500); + if batch_size == 0 { + bail!("batch_size must be positive"); + } Ok(LoadConfig { connection: self.connection.resolve(&file.connection), num_databases, warehouses_per_database, load_parallelism: load_parallelism.min(usize::from(num_databases)), - batch_size: self.batch_size.or(file.load.batch_size).unwrap_or(500), + batch_size, reset: self.reset.or(file.load.reset).unwrap_or(true), }) } @@ -421,3 +426,23 @@ impl DriverConfig { u32::from(self.driver_warehouse_count) * u32::from(crate::tpcc::DISTRICTS_PER_WAREHOUSE) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn load_args_reject_zero_batch_size() { + let args = LoadArgs { + connection: ConnectionArgs::default(), + num_databases: Some(1), + warehouses_per_database: Some(1), + load_parallelism: Some(1), + batch_size: Some(0), + reset: Some(true), + }; + + let err = args.resolve(&FileConfig::default()).unwrap_err().to_string(); + assert!(err.contains("batch_size must be positive"), "{err}"); + } +} diff --git a/tools/tpcc-runner/src/coordinator.rs b/tools/tpcc-runner/src/coordinator.rs index fdfd6473d95..7d17ae61a35 100644 --- a/tools/tpcc-runner/src/coordinator.rs +++ b/tools/tpcc-runner/src/coordinator.rs @@ -78,7 +78,9 @@ async fn register_driver( inner.registration_order.push(request.driver_id.clone()); inner.registrations.insert( request.driver_id.clone(), - DriverRegistration { assignment: assignment.clone() }, + DriverRegistration { + assignment: assignment.clone(), + }, ); assignment } diff --git a/tools/tpcc-runner/src/legacy_loader.rs b/tools/tpcc-runner/src/legacy_loader.rs new file mode 100644 index 00000000000..6265e387196 --- /dev/null +++ b/tools/tpcc-runner/src/legacy_loader.rs @@ -0,0 +1,484 @@ +use anyhow::{Context, Result}; +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use std::ops::Range; +use std::sync::{Arc, Condvar, Mutex}; +use std::thread; +use std::time::SystemTime; + +use crate::client::ModuleClient; +use crate::config::LoadConfig; +use crate::module_bindings::*; +use crate::topology::DatabaseTopology; +use crate::tpcc::*; +use spacetimedb_sdk::Timestamp; + +const WAREHOUSE_YTD_CENTS: i64 = 30_000_000; +const DISTRICT_YTD_CENTS: i64 = 3_000_000; +const CUSTOMER_CREDIT_LIMIT_CENTS: i64 = 5_000_000; +const CUSTOMER_INITIAL_BALANCE_CENTS: i64 = -1_000; +const CUSTOMER_INITIAL_YTD_PAYMENT_CENTS: i64 = 1_000; +const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; + +pub async fn run(config: LoadConfig) -> Result<()> { + log::info!( + "Loading tpcc dataset into {} databases on {} with parallelism {}", + config.num_databases, + config.connection.uri, + config.load_parallelism + ); + + let topology = DatabaseTopology::for_load(&config).await?; + let chunks = database_number_chunks(config.num_databases, config.load_parallelism); + let mut handles = Vec::with_capacity(chunks.len()); + + for (worker_idx, chunk) in chunks.into_iter().enumerate() { + let config = config.clone(); + let topology = topology.clone(); + let thread_name = format!("tpcc-load-{worker_idx}"); + let handle = thread::Builder::new() + .name(thread_name.clone()) + .spawn(move || -> Result<()> { + for database_number in chunk { + configure_one_database(&config, database_number, &topology)?; + } + Ok(()) + }) + .with_context(|| format!("failed to spawn {thread_name}"))?; + handles.push(handle); + } + + for handle in handles { + match handle.join() { + Ok(Ok(())) => {} + Ok(Err(err)) => return Err(err), + Err(_) => anyhow::bail!("loader worker thread panicked"), + } + } + + log::info!("tpcc load finished"); + + Ok(()) +} + +fn database_number_chunks(num_databases: u16, parallelism: usize) -> Vec> { + let database_numbers: Vec = (0..num_databases).collect(); + let chunk_size = database_numbers.len().div_ceil(parallelism); + database_numbers + .chunks(chunk_size) + .map(|chunk| chunk.to_vec()) + .collect() +} + +fn configure_one_database(config: &LoadConfig, database_number: u16, topology: &DatabaseTopology) -> Result<()> { + let database = topology.identity_for_database_number(database_number)?; + log::info!( + "loading tpcc dataset into {} / {} with {} warehouse(s)", + config.connection.uri, + database, + config.warehouses_per_database + ); + + let client = ModuleClient::connect(&config.connection, database)?; + if config.reset { + client.reset_tpcc().context("failed to reset tpcc data")?; + } + + client.set_spacetimedb_uri(&config.connection.uri)?; + + let mut rng = StdRng::seed_from_u64(0x5eed_5eed); + let load_c_last = rng.random_range(0..=255); + let base_ts = Timestamp::from(SystemTime::now()); + + let pending = Arc::new((Mutex::new(0_u64), Condvar::new())); + let errors = Arc::new(Mutex::new(None)); + + load_remote_warehouses( + &client, + database_number, + config.num_databases, + config.warehouses_per_database, + config.batch_size, + topology, + &pending, + &errors, + )?; + load_items(&client, config.batch_size, &mut rng, &pending, &errors)?; + load_warehouses_and_districts( + &client, + database_number, + config.warehouses_per_database, + config.batch_size, + base_ts, + &mut rng, + &pending, + &errors, + )?; + load_stock( + &client, + database_number, + config.warehouses_per_database, + config.batch_size, + &mut rng, + &pending, + &errors, + )?; + load_customers_history_orders( + &client, + database_number, + config.warehouses_per_database, + config.batch_size, + base_ts, + load_c_last, + &mut rng, + &pending, + &errors, + )?; + + wait_for_pending(&pending); + take_first_error(&errors)?; + + client.shutdown(); + log::info!("tpcc load for database {database} finished"); + + Ok(()) +} + +fn load_items( + client: &ModuleClient, + batch_size: usize, + rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, +) -> Result<()> { + let mut batch = Vec::with_capacity(batch_size); + for item_id in 1..=ITEMS { + batch.push(Item { + i_id: item_id, + i_im_id: rng.random_range(1..=10_000), + i_name: alpha_numeric_string(rng, 14, 24), + i_price_cents: rng.random_range(100..=10_000), + i_data: maybe_with_original(rng, 26, 50), + }); + if batch.len() >= batch_size { + client.queue_load_items(std::mem::take(&mut batch), pending, errors)?; + } + } + if !batch.is_empty() { + client.queue_load_items(batch, pending, errors)?; + } + Ok(()) +} + +fn warehouses_range(database_number: u16, warehouses_per_database: u16) -> Range { + let start_warehouse_number = database_number * warehouses_per_database + 1; + let end_warehouse_number = start_warehouse_number + warehouses_per_database; + start_warehouse_number..end_warehouse_number +} + +#[allow(clippy::too_many_arguments)] +fn load_remote_warehouses( + client: &ModuleClient, + database_number: u16, + num_databases: u16, + warehouses_per_database: u16, + batch_size: usize, + topology: &DatabaseTopology, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, +) -> Result<()> { + let mut warehouse_batch = Vec::with_capacity(batch_size); + + for other_database_number in 0..num_databases { + if other_database_number == database_number { + continue; + } + let other_database_ident = topology.identity_for_database_number(other_database_number)?; + + for w_id in warehouses_range(other_database_number, warehouses_per_database) { + warehouse_batch.push(RemoteWarehouse { + w_id, + remote_database_home: other_database_ident, + }); + } + } + + while !warehouse_batch.is_empty() { + let split_at = warehouse_batch.len().min(batch_size); + let remainder = warehouse_batch.split_off(split_at); + let rows = std::mem::replace(&mut warehouse_batch, remainder); + client.queue_load_remote_warehouses(rows, pending, errors)?; + } + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +fn load_warehouses_and_districts( + client: &ModuleClient, + database_number: u16, + warehouses_per_database: u16, + batch_size: usize, + timestamp: Timestamp, + rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, +) -> Result<()> { + let mut warehouse_batch = Vec::with_capacity(batch_size); + let mut district_batch = Vec::with_capacity(batch_size); + + for w_id in warehouses_range(database_number, warehouses_per_database) { + warehouse_batch.push(Warehouse { + w_id, + w_name: alpha_string(rng, 6, 10), + w_street_1: alpha_numeric_string(rng, 10, 20), + w_street_2: alpha_numeric_string(rng, 10, 20), + w_city: alpha_string(rng, 10, 20), + w_state: alpha_string(rng, 2, 2), + w_zip: zip_code(rng), + w_tax_bps: rng.random_range(0..=2_000), + w_ytd_cents: WAREHOUSE_YTD_CENTS, + }); + + for d_id in 1..=DISTRICTS_PER_WAREHOUSE { + district_batch.push(District { + district_key: pack_district_key(w_id, d_id), + d_w_id: w_id, + d_id, + d_name: alpha_string(rng, 6, 10), + d_street_1: alpha_numeric_string(rng, 10, 20), + d_street_2: alpha_numeric_string(rng, 10, 20), + d_city: alpha_string(rng, 10, 20), + d_state: alpha_string(rng, 2, 2), + d_zip: zip_code(rng), + d_tax_bps: rng.random_range(0..=2_000), + d_ytd_cents: DISTRICT_YTD_CENTS, + d_next_o_id: CUSTOMERS_PER_DISTRICT + 1, + }); + } + } + + while !warehouse_batch.is_empty() { + let split_at = warehouse_batch.len().min(batch_size); + let remainder = warehouse_batch.split_off(split_at); + let rows = std::mem::replace(&mut warehouse_batch, remainder); + client.queue_load_warehouses(rows, pending, errors)?; + } + while !district_batch.is_empty() { + let split_at = district_batch.len().min(batch_size); + let remainder = district_batch.split_off(split_at); + let rows = std::mem::replace(&mut district_batch, remainder); + client.queue_load_districts(rows, pending, errors)?; + } + let _ = timestamp; + Ok(()) +} + +fn load_stock( + client: &ModuleClient, + database_number: u16, + warehouses_per_database: u16, + batch_size: usize, + rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, +) -> Result<()> { + let mut batch = Vec::with_capacity(batch_size); + for w_id in warehouses_range(database_number, warehouses_per_database) { + for item_id in 1..=ITEMS { + batch.push(Stock { + stock_key: pack_stock_key(w_id, item_id), + s_w_id: w_id, + s_i_id: item_id, + s_quantity: rng.random_range(10..=100), + s_dist_01: alpha_string(rng, 24, 24), + s_dist_02: alpha_string(rng, 24, 24), + s_dist_03: alpha_string(rng, 24, 24), + s_dist_04: alpha_string(rng, 24, 24), + s_dist_05: alpha_string(rng, 24, 24), + s_dist_06: alpha_string(rng, 24, 24), + s_dist_07: alpha_string(rng, 24, 24), + s_dist_08: alpha_string(rng, 24, 24), + s_dist_09: alpha_string(rng, 24, 24), + s_dist_10: alpha_string(rng, 24, 24), + s_ytd: 0, + s_order_cnt: 0, + s_remote_cnt: 0, + s_data: maybe_with_original(rng, 26, 50), + }); + if batch.len() >= batch_size { + client.queue_load_stocks(std::mem::take(&mut batch), pending, errors)?; + } + } + } + if !batch.is_empty() { + client.queue_load_stocks(batch, pending, errors)?; + } + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +fn load_customers_history_orders( + client: &ModuleClient, + database_number: u16, + warehouses_per_database: u16, + batch_size: usize, + timestamp: Timestamp, + load_c_last: u32, + rng: &mut StdRng, + pending: &Arc<(Mutex, Condvar)>, + errors: &Arc>>, +) -> Result<()> { + let mut customer_batch = Vec::with_capacity(batch_size); + let mut history_batch = Vec::with_capacity(batch_size); + let mut order_batch = Vec::with_capacity(batch_size); + let mut new_order_batch = Vec::with_capacity(batch_size); + let mut order_line_batch = Vec::with_capacity(batch_size); + + for w_id in warehouses_range(database_number, warehouses_per_database) { + for d_id in 1..=DISTRICTS_PER_WAREHOUSE { + let mut permutation: Vec = (1..=CUSTOMERS_PER_DISTRICT).collect(); + permutation.shuffle(rng); + + for c_id in 1..=CUSTOMERS_PER_DISTRICT { + let credit = if rng.random_bool(0.10) { "BC" } else { "GC" }; + let last_name = if c_id <= 1_000 { + make_last_name(c_id - 1) + } else { + make_last_name(nurand(rng, 255, 0, 999, load_c_last)) + }; + customer_batch.push(Customer { + customer_key: pack_customer_key(w_id, d_id, c_id), + c_w_id: w_id, + c_d_id: d_id, + c_id, + c_first: alpha_string(rng, 8, 16), + c_middle: "OE".to_string(), + c_last: last_name, + c_street_1: alpha_numeric_string(rng, 10, 20), + c_street_2: alpha_numeric_string(rng, 10, 20), + c_city: alpha_string(rng, 10, 20), + c_state: alpha_string(rng, 2, 2), + c_zip: zip_code(rng), + c_phone: numeric_string(rng, 16, 16), + c_since: timestamp, + c_credit: credit.to_string(), + c_credit_lim_cents: CUSTOMER_CREDIT_LIMIT_CENTS, + c_discount_bps: rng.random_range(0..=5_000), + c_balance_cents: CUSTOMER_INITIAL_BALANCE_CENTS, + c_ytd_payment_cents: CUSTOMER_INITIAL_YTD_PAYMENT_CENTS, + c_payment_cnt: 1, + c_delivery_cnt: 0, + c_data: alpha_numeric_string(rng, 300, 500), + }); + history_batch.push(History { + history_id: 0, + h_c_id: c_id, + h_c_d_id: d_id, + h_c_w_id: w_id, + h_d_id: d_id, + h_w_id: w_id, + h_date: timestamp, + h_amount_cents: HISTORY_INITIAL_AMOUNT_CENTS, + h_data: alpha_numeric_string(rng, 12, 24), + }); + + if customer_batch.len() >= batch_size { + client.queue_load_customers(std::mem::take(&mut customer_batch), pending, errors)?; + } + if history_batch.len() >= batch_size { + client.queue_load_history(std::mem::take(&mut history_batch), pending, errors)?; + } + } + + for o_id in 1..=CUSTOMERS_PER_DISTRICT { + let customer_id = permutation[(o_id - 1) as usize]; + let delivered = o_id < NEW_ORDER_START; + let order_line_count = rng.random_range(5..=15) as u8; + order_batch.push(OOrder { + order_key: pack_order_key(w_id, d_id, o_id), + o_w_id: w_id, + o_d_id: d_id, + o_id, + o_c_id: customer_id, + o_entry_d: timestamp, + o_carrier_id: if delivered { + Some(rng.random_range(1..=10)) + } else { + None + }, + o_ol_cnt: order_line_count, + o_all_local: true, + }); + if !delivered { + new_order_batch.push(NewOrder { + new_order_key: pack_order_key(w_id, d_id, o_id), + no_w_id: w_id, + no_d_id: d_id, + no_o_id: o_id, + }); + } + + for ol_number in 1..=order_line_count { + order_line_batch.push(OrderLine { + order_line_key: pack_order_line_key(w_id, d_id, o_id, ol_number), + ol_w_id: w_id, + ol_d_id: d_id, + ol_o_id: o_id, + ol_number, + ol_i_id: rng.random_range(1..=ITEMS), + ol_supply_w_id: w_id, + ol_delivery_d: if delivered { Some(timestamp) } else { None }, + ol_quantity: 5, + ol_amount_cents: if delivered { 0 } else { rng.random_range(1..=999_999) }, + ol_dist_info: alpha_string(rng, 24, 24), + }); + if order_line_batch.len() >= batch_size { + client.queue_load_order_lines(std::mem::take(&mut order_line_batch), pending, errors)?; + } + } + + if order_batch.len() >= batch_size { + client.queue_load_orders(std::mem::take(&mut order_batch), pending, errors)?; + } + if new_order_batch.len() >= batch_size { + client.queue_load_new_orders(std::mem::take(&mut new_order_batch), pending, errors)?; + } + } + } + } + + if !customer_batch.is_empty() { + client.queue_load_customers(customer_batch, pending, errors)?; + } + if !history_batch.is_empty() { + client.queue_load_history(history_batch, pending, errors)?; + } + if !order_batch.is_empty() { + client.queue_load_orders(order_batch, pending, errors)?; + } + if !new_order_batch.is_empty() { + client.queue_load_new_orders(new_order_batch, pending, errors)?; + } + if !order_line_batch.is_empty() { + client.queue_load_order_lines(order_line_batch, pending, errors)?; + } + + Ok(()) +} + +fn wait_for_pending(pending: &Arc<(Mutex, Condvar)>) { + let (lock, cvar) = pending.as_ref(); + let mut guard = lock.lock().unwrap(); + while *guard > 0 { + guard = cvar.wait(guard).unwrap(); + } +} + +fn take_first_error(errors: &Arc>>) -> Result<()> { + let mut guard = errors.lock().unwrap(); + if let Some(err) = guard.take() { + Err(err) + } else { + Ok(()) + } +} diff --git a/tools/tpcc-runner/src/lib.rs b/tools/tpcc-runner/src/lib.rs index 52d39edc61d..9a42504cc1a 100644 --- a/tools/tpcc-runner/src/lib.rs +++ b/tools/tpcc-runner/src/lib.rs @@ -2,6 +2,7 @@ mod client; pub mod config; pub mod coordinator; pub mod driver; +pub mod legacy_loader; pub mod loader; mod module_bindings; mod protocol; @@ -21,6 +22,7 @@ pub async fn run_cli(cli: Cli) -> anyhow::Result<()> { match cli.command { Command::Load(args) => loader::run(args.resolve(&file_config)?).await, + Command::LoadClient(args) => legacy_loader::run(args.resolve(&file_config)?).await, Command::Driver(args) => driver::run(args.resolve(&file_config)?).await, Command::Coordinator(args) => coordinator::run(args.resolve(&file_config)?).await, } diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index 19b3ab27175..c2f6994df7f 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -1,23 +1,15 @@ use anyhow::{Context, Result}; -use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use std::ops::Range; -use std::sync::{Arc, Condvar, Mutex}; +use rand::{rngs::StdRng, Rng, SeedableRng}; use std::thread; -use std::time::SystemTime; +use std::time::{Duration, SystemTime}; use crate::client::ModuleClient; use crate::config::LoadConfig; -use crate::module_bindings::*; +use crate::module_bindings::{TpccLoadConfigRequest, TpccLoadStatus}; use crate::topology::DatabaseTopology; -use crate::tpcc::*; use spacetimedb_sdk::Timestamp; -const WAREHOUSE_YTD_CENTS: i64 = 30_000_000; -const DISTRICT_YTD_CENTS: i64 = 3_000_000; -const CUSTOMER_CREDIT_LIMIT_CENTS: i64 = 5_000_000; -const CUSTOMER_INITIAL_BALANCE_CENTS: i64 = -1_000; -const CUSTOMER_INITIAL_YTD_PAYMENT_CENTS: i64 = 1_000; -const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; +const LOAD_SEED: u64 = 0x5eed_5eed; pub async fn run(config: LoadConfig) -> Result<()> { log::info!( @@ -39,7 +31,7 @@ pub async fn run(config: LoadConfig) -> Result<()> { .name(thread_name.clone()) .spawn(move || -> Result<()> { for database_number in chunk { - configure_one_database(&config, database_number, &topology)?; + run_one_database(&config, database_number, &topology)?; } Ok(()) }) @@ -56,7 +48,6 @@ pub async fn run(config: LoadConfig) -> Result<()> { } log::info!("tpcc load finished"); - Ok(()) } @@ -69,417 +60,108 @@ fn database_number_chunks(num_databases: u16, parallelism: usize) -> Vec Result<()> { - let database = topology.identity_for_database_number(database_number)?; +fn run_one_database(config: &LoadConfig, database_number: u16, topology: &DatabaseTopology) -> Result<()> { + let database_identity = topology.identity_for_database_number(database_number)?; log::info!( - "loading tpcc dataset into {} / {} with {} warehouse(s)", + "starting tpcc load into {} / {} with {} warehouse(s)", config.connection.uri, - database, + database_identity, config.warehouses_per_database ); - let client = ModuleClient::connect(&config.connection, database)?; + let mut client = ModuleClient::connect(&config.connection, database_identity)?; + client.subscribe_load_state()?; + if config.reset { client.reset_tpcc().context("failed to reset tpcc data")?; } - client.set_spacetimedb_uri(&config.connection.uri)?; - - let mut rng = StdRng::seed_from_u64(0x5eed_5eed); - let load_c_last = rng.random_range(0..=255); - let base_ts = Timestamp::from(SystemTime::now()); - - let pending = Arc::new((Mutex::new(0_u64), Condvar::new())); - let errors = Arc::new(Mutex::new(None)); - - load_remote_warehouses( - &client, - database_number, - config.num_databases, - config.warehouses_per_database, - config.batch_size, - topology, - &pending, - &errors, - )?; - load_items(&client, config.batch_size, &mut rng, &pending, &errors)?; - load_warehouses_and_districts( - &client, - database_number, - config.warehouses_per_database, - config.batch_size, - base_ts, - &mut rng, - &pending, - &errors, - )?; - load_stock( - &client, - database_number, - config.warehouses_per_database, - config.batch_size, - &mut rng, - &pending, - &errors, - )?; - load_customers_history_orders( - &client, - database_number, - config.warehouses_per_database, - config.batch_size, - base_ts, - load_c_last, - &mut rng, - &pending, - &errors, - )?; - - wait_for_pending(&pending); - take_first_error(&errors)?; + let request = build_load_request(config, database_number, topology)?; + client + .configure_tpcc_load(request) + .context("failed to configure tpcc load")?; + client.start_tpcc_load().context("failed to start tpcc load")?; + wait_for_load_completion(&client, database_identity)?; client.shutdown(); - log::info!("tpcc load for database {database} finished"); + log::info!("tpcc load for database {database_identity} finished"); Ok(()) } -fn load_items( - client: &ModuleClient, - batch_size: usize, - rng: &mut StdRng, - pending: &Arc<(Mutex, Condvar)>, - errors: &Arc>>, -) -> Result<()> { - let mut batch = Vec::with_capacity(batch_size); - for item_id in 1..=ITEMS { - batch.push(Item { - i_id: item_id, - i_im_id: rng.random_range(1..=10_000), - i_name: alpha_numeric_string(rng, 14, 24), - i_price_cents: rng.random_range(100..=10_000), - i_data: maybe_with_original(rng, 26, 50), - }); - if batch.len() >= batch_size { - client.queue_load_items(std::mem::take(&mut batch), &pending, &errors)?; - } - } - if !batch.is_empty() { - client.queue_load_items(batch, &pending, &errors)?; - } - Ok(()) -} - -fn warehouses_range(database_number: u16, warehouses_per_database: u16) -> Range { - let start_warehouse_number = database_number * warehouses_per_database + 1; - let end_warehouse_number = start_warehouse_number + warehouses_per_database; - start_warehouse_number..end_warehouse_number -} - -fn load_remote_warehouses( - client: &ModuleClient, +fn build_load_request( + config: &LoadConfig, database_number: u16, - num_databases: u16, - warehouses_per_database: u16, - batch_size: usize, topology: &DatabaseTopology, - pending: &Arc<(Mutex, Condvar)>, - errors: &Arc>>, -) -> Result<()> { - let mut warehouse_batch = Vec::with_capacity(batch_size); - - for other_database_number in 0..num_databases { - if other_database_number == database_number { - continue; - } - let other_database_ident = topology.identity_for_database_number(other_database_number)?; - - for w_id in warehouses_range(other_database_number, warehouses_per_database) { - warehouse_batch.push(RemoteWarehouse { - w_id, - remote_database_home: other_database_ident, - }); - } - } - - while !warehouse_batch.is_empty() { - let split_at = warehouse_batch.len().min(batch_size); - let remainder = warehouse_batch.split_off(split_at); - let rows = std::mem::replace(&mut warehouse_batch, remainder); - client.queue_load_remote_warehouses(rows, &pending, &errors)?; - } - - Ok(()) -} - -fn load_warehouses_and_districts( - client: &ModuleClient, - database_number: u16, - warehouses_per_database: u16, - batch_size: usize, - timestamp: Timestamp, - rng: &mut StdRng, - pending: &Arc<(Mutex, Condvar)>, - errors: &Arc>>, -) -> Result<()> { - let mut warehouse_batch = Vec::with_capacity(batch_size); - let mut district_batch = Vec::with_capacity(batch_size); - - for w_id in warehouses_range(database_number, warehouses_per_database) { - warehouse_batch.push(Warehouse { - w_id, - w_name: alpha_string(rng, 6, 10), - w_street_1: alpha_numeric_string(rng, 10, 20), - w_street_2: alpha_numeric_string(rng, 10, 20), - w_city: alpha_string(rng, 10, 20), - w_state: alpha_string(rng, 2, 2), - w_zip: zip_code(rng), - w_tax_bps: rng.random_range(0..=2_000), - w_ytd_cents: WAREHOUSE_YTD_CENTS, - }); - - for d_id in 1..=DISTRICTS_PER_WAREHOUSE { - district_batch.push(District { - district_key: pack_district_key(w_id, d_id), - d_w_id: w_id, - d_id, - d_name: alpha_string(rng, 6, 10), - d_street_1: alpha_numeric_string(rng, 10, 20), - d_street_2: alpha_numeric_string(rng, 10, 20), - d_city: alpha_string(rng, 10, 20), - d_state: alpha_string(rng, 2, 2), - d_zip: zip_code(rng), - d_tax_bps: rng.random_range(0..=2_000), - d_ytd_cents: DISTRICT_YTD_CENTS, - d_next_o_id: CUSTOMERS_PER_DISTRICT + 1, - }); - } - } - - while !warehouse_batch.is_empty() { - let split_at = warehouse_batch.len().min(batch_size); - let remainder = warehouse_batch.split_off(split_at); - let rows = std::mem::replace(&mut warehouse_batch, remainder); - client.queue_load_warehouses(rows, &pending, &errors)?; - } - while !district_batch.is_empty() { - let split_at = district_batch.len().min(batch_size); - let remainder = district_batch.split_off(split_at); - let rows = std::mem::replace(&mut district_batch, remainder); - client.queue_load_districts(rows, &pending, &errors)?; +) -> Result { + let mut rng = StdRng::seed_from_u64(LOAD_SEED); + let load_c_last = rng.random_range(0..=255); + let mut database_identities = Vec::with_capacity(config.num_databases as usize); + for database_number in 0..config.num_databases { + database_identities.push(topology.identity_for_database_number(database_number)?); } - let _ = timestamp; - Ok(()) -} -fn load_stock( - client: &ModuleClient, - database_number: u16, - warehouses_per_database: u16, - batch_size: usize, - rng: &mut StdRng, - pending: &Arc<(Mutex, Condvar)>, - errors: &Arc>>, -) -> Result<()> { - let mut batch = Vec::with_capacity(batch_size); - for w_id in warehouses_range(database_number, warehouses_per_database) { - for item_id in 1..=ITEMS { - batch.push(Stock { - stock_key: pack_stock_key(w_id, item_id), - s_w_id: w_id, - s_i_id: item_id, - s_quantity: rng.random_range(10..=100), - s_dist_01: alpha_string(rng, 24, 24), - s_dist_02: alpha_string(rng, 24, 24), - s_dist_03: alpha_string(rng, 24, 24), - s_dist_04: alpha_string(rng, 24, 24), - s_dist_05: alpha_string(rng, 24, 24), - s_dist_06: alpha_string(rng, 24, 24), - s_dist_07: alpha_string(rng, 24, 24), - s_dist_08: alpha_string(rng, 24, 24), - s_dist_09: alpha_string(rng, 24, 24), - s_dist_10: alpha_string(rng, 24, 24), - s_ytd: 0, - s_order_cnt: 0, - s_remote_cnt: 0, - s_data: maybe_with_original(rng, 26, 50), - }); - if batch.len() >= batch_size { - client.queue_load_stocks(std::mem::take(&mut batch), &pending, &errors)?; - } - } - } - if !batch.is_empty() { - client.queue_load_stocks(batch, &pending, &errors)?; - } - Ok(()) + Ok(TpccLoadConfigRequest { + database_number, + num_databases: config.num_databases, + warehouses_per_database: config.warehouses_per_database, + batch_size: u32::try_from(config.batch_size).context("batch_size exceeds u32")?, + seed: LOAD_SEED, + load_c_last, + base_ts: Timestamp::from(SystemTime::now()), + spacetimedb_uri: config.connection.uri.clone(), + database_identities, + }) } -fn load_customers_history_orders( - client: &ModuleClient, - database_number: u16, - warehouses_per_database: u16, - batch_size: usize, - timestamp: Timestamp, - load_c_last: u32, - rng: &mut StdRng, - pending: &Arc<(Mutex, Condvar)>, - errors: &Arc>>, -) -> Result<()> { - let mut customer_batch = Vec::with_capacity(batch_size); - let mut history_batch = Vec::with_capacity(batch_size); - let mut order_batch = Vec::with_capacity(batch_size); - let mut new_order_batch = Vec::with_capacity(batch_size); - let mut order_line_batch = Vec::with_capacity(batch_size); - - for w_id in warehouses_range(database_number, warehouses_per_database) { - for d_id in 1..=DISTRICTS_PER_WAREHOUSE { - let mut permutation: Vec = (1..=CUSTOMERS_PER_DISTRICT).collect(); - permutation.shuffle(rng); - - for c_id in 1..=CUSTOMERS_PER_DISTRICT { - let credit = if rng.random_bool(0.10) { "BC" } else { "GC" }; - let last_name = if c_id <= 1_000 { - make_last_name(c_id - 1) - } else { - make_last_name(nurand(rng, 255, 0, 999, load_c_last)) - }; - customer_batch.push(Customer { - customer_key: pack_customer_key(w_id, d_id, c_id), - c_w_id: w_id, - c_d_id: d_id, - c_id, - c_first: alpha_string(rng, 8, 16), - c_middle: "OE".to_string(), - c_last: last_name, - c_street_1: alpha_numeric_string(rng, 10, 20), - c_street_2: alpha_numeric_string(rng, 10, 20), - c_city: alpha_string(rng, 10, 20), - c_state: alpha_string(rng, 2, 2), - c_zip: zip_code(rng), - c_phone: numeric_string(rng, 16, 16), - c_since: timestamp, - c_credit: credit.to_string(), - c_credit_lim_cents: CUSTOMER_CREDIT_LIMIT_CENTS, - c_discount_bps: rng.random_range(0..=5_000), - c_balance_cents: CUSTOMER_INITIAL_BALANCE_CENTS, - c_ytd_payment_cents: CUSTOMER_INITIAL_YTD_PAYMENT_CENTS, - c_payment_cnt: 1, - c_delivery_cnt: 0, - c_data: alpha_numeric_string(rng, 300, 500), - }); - history_batch.push(History { - history_id: 0, - h_c_id: c_id, - h_c_d_id: d_id, - h_c_w_id: w_id, - h_d_id: d_id, - h_w_id: w_id, - h_date: timestamp, - h_amount_cents: HISTORY_INITIAL_AMOUNT_CENTS, - h_data: alpha_numeric_string(rng, 12, 24), - }); - - if customer_batch.len() >= batch_size { - client.queue_load_customers(std::mem::take(&mut customer_batch), &pending, &errors)?; - } - if history_batch.len() >= batch_size { - client.queue_load_history(std::mem::take(&mut history_batch), &pending, &errors)?; - } +fn wait_for_load_completion(client: &ModuleClient, database_identity: spacetimedb_sdk::Identity) -> Result<()> { + let mut last_logged = None; + + loop { + client.ensure_connected()?; + + if let Some(state) = client.load_state() { + let current_progress = ( + state.status, + state.phase, + state.next_warehouse_id, + state.next_district_id, + state.next_item_id, + state.next_order_id, + state.chunks_completed, + state.rows_inserted, + ); + if last_logged != Some(current_progress) { + log::info!( + "tpcc load progress for {}: status={:?} phase={:?} chunks={} rows={} next=({},{},{},{})", + database_identity, + state.status, + state.phase, + state.chunks_completed, + state.rows_inserted, + state.next_warehouse_id, + state.next_district_id, + state.next_item_id, + state.next_order_id + ); + last_logged = Some(current_progress); } - for o_id in 1..=CUSTOMERS_PER_DISTRICT { - let customer_id = permutation[(o_id - 1) as usize]; - let delivered = o_id < NEW_ORDER_START; - let order_line_count = rng.random_range(5..=15) as u8; - order_batch.push(OOrder { - order_key: pack_order_key(w_id, d_id, o_id), - o_w_id: w_id, - o_d_id: d_id, - o_id, - o_c_id: customer_id, - o_entry_d: timestamp, - o_carrier_id: if delivered { - Some(rng.random_range(1..=10)) - } else { - None - }, - o_ol_cnt: order_line_count, - o_all_local: true, - }); - if !delivered { - new_order_batch.push(NewOrder { - new_order_key: pack_order_key(w_id, d_id, o_id), - no_w_id: w_id, - no_d_id: d_id, - no_o_id: o_id, - }); - } - - for ol_number in 1..=order_line_count { - order_line_batch.push(OrderLine { - order_line_key: pack_order_line_key(w_id, d_id, o_id, ol_number), - ol_w_id: w_id, - ol_d_id: d_id, - ol_o_id: o_id, - ol_number, - ol_i_id: rng.random_range(1..=ITEMS), - ol_supply_w_id: w_id, - ol_delivery_d: if delivered { Some(timestamp) } else { None }, - ol_quantity: 5, - ol_amount_cents: if delivered { 0 } else { rng.random_range(1..=999_999) }, - ol_dist_info: alpha_string(rng, 24, 24), - }); - if order_line_batch.len() >= batch_size { - client.queue_load_order_lines( - std::mem::take(&mut order_line_batch), - &pending, - &errors, - )?; - } - } - - if order_batch.len() >= batch_size { - client.queue_load_orders(std::mem::take(&mut order_batch), &pending, &errors)?; - } - if new_order_batch.len() >= batch_size { - client.queue_load_new_orders(std::mem::take(&mut new_order_batch), &pending, &errors)?; + match state.status { + TpccLoadStatus::Complete => return Ok(()), + TpccLoadStatus::Failed => { + anyhow::bail!( + "tpcc load failed for {}: {}", + database_identity, + state + .last_error + .unwrap_or_else(|| "load failed without an error message".to_string()) + ) } + TpccLoadStatus::Idle | TpccLoadStatus::Running => {} } } - } - - if !customer_batch.is_empty() { - client.queue_load_customers(customer_batch, &pending, &errors)?; - } - if !history_batch.is_empty() { - client.queue_load_history(history_batch, &pending, &errors)?; - } - if !order_batch.is_empty() { - client.queue_load_orders(order_batch, &pending, &errors)?; - } - if !new_order_batch.is_empty() { - client.queue_load_new_orders(new_order_batch, &pending, &errors)?; - } - if !order_line_batch.is_empty() { - client.queue_load_order_lines(order_line_batch, &pending, &errors)?; - } - - Ok(()) -} - -fn wait_for_pending(pending: &Arc<(Mutex, Condvar)>) { - let (lock, cvar) = pending.as_ref(); - let mut guard = lock.lock().unwrap(); - while *guard > 0 { - guard = cvar.wait(guard).unwrap(); - } -} -fn take_first_error(errors: &Arc>>) -> Result<()> { - let mut guard = errors.lock().unwrap(); - if let Some(err) = guard.take() { - Err(err) - } else { - Ok(()) + thread::sleep(Duration::from_millis(250)); } } diff --git a/tools/tpcc-runner/src/module_bindings/configure_tpcc_load_reducer.rs b/tools/tpcc-runner/src/module_bindings/configure_tpcc_load_reducer.rs new file mode 100644 index 00000000000..92271a5f7e3 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/configure_tpcc_load_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::tpcc_load_config_request_type::TpccLoadConfigRequest; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct ConfigureTpccLoadArgs { + pub request: TpccLoadConfigRequest, +} + +impl From for super::Reducer { + fn from(args: ConfigureTpccLoadArgs) -> Self { + Self::ConfigureTpccLoad { request: args.request } + } +} + +impl __sdk::InModule for ConfigureTpccLoadArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `configure_tpcc_load`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait configure_tpcc_load { + /// Request that the remote module invoke the reducer `configure_tpcc_load` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`configure_tpcc_load:configure_tpcc_load_then`] to run a callback after the reducer completes. + fn configure_tpcc_load(&self, request: TpccLoadConfigRequest) -> __sdk::Result<()> { + self.configure_tpcc_load_then(request, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `configure_tpcc_load` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn configure_tpcc_load_then( + &self, + request: TpccLoadConfigRequest, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl configure_tpcc_load for super::RemoteReducers { + fn configure_tpcc_load_then( + &self, + request: TpccLoadConfigRequest, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(ConfigureTpccLoadArgs { request }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/mod.rs b/tools/tpcc-runner/src/module_bindings/mod.rs index a3d2e2bbdd7..0507a9cd4ae 100644 --- a/tools/tpcc-runner/src/module_bindings/mod.rs +++ b/tools/tpcc-runner/src/module_bindings/mod.rs @@ -6,6 +6,7 @@ #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; +pub mod configure_tpcc_load_reducer; pub mod confirm_item_reservation_reducer; pub mod customer_selector_type; pub mod customer_type; @@ -50,15 +51,26 @@ pub mod reserve_item_input_type; pub mod reserve_item_output_type; pub mod reserved_item_log_type; pub mod reset_tpcc_reducer; +pub mod restart_tpcc_load_reducer; +pub mod resume_tpcc_load_reducer; pub mod rollback_item_reservation_reducer; pub mod set_spacetimedb_uri_reducer; pub mod spacetime_db_uri_type; +pub mod start_tpcc_load_reducer; pub mod stock_level_procedure; pub mod stock_level_result_type; pub mod stock_type; pub mod test_procedure; +pub mod tpcc_load_config_request_type; +pub mod tpcc_load_config_type; +pub mod tpcc_load_job_type; +pub mod tpcc_load_phase_type; +pub mod tpcc_load_state_table; +pub mod tpcc_load_state_type; +pub mod tpcc_load_status_type; pub mod warehouse_type; +pub use configure_tpcc_load_reducer::configure_tpcc_load; pub use confirm_item_reservation_reducer::confirm_item_reservation; pub use customer_selector_type::CustomerSelector; pub use customer_type::Customer; @@ -103,13 +115,23 @@ pub use reserve_item_input_type::ReserveItemInput; pub use reserve_item_output_type::ReserveItemOutput; pub use reserved_item_log_type::ReservedItemLog; pub use reset_tpcc_reducer::reset_tpcc; +pub use restart_tpcc_load_reducer::restart_tpcc_load; +pub use resume_tpcc_load_reducer::resume_tpcc_load; pub use rollback_item_reservation_reducer::rollback_item_reservation; pub use set_spacetimedb_uri_reducer::set_spacetimedb_uri; pub use spacetime_db_uri_type::SpacetimeDbUri; +pub use start_tpcc_load_reducer::start_tpcc_load; pub use stock_level_procedure::stock_level; pub use stock_level_result_type::StockLevelResult; pub use stock_type::Stock; pub use test_procedure::test; +pub use tpcc_load_config_request_type::TpccLoadConfigRequest; +pub use tpcc_load_config_type::TpccLoadConfig; +pub use tpcc_load_job_type::TpccLoadJob; +pub use tpcc_load_phase_type::TpccLoadPhase; +pub use tpcc_load_state_table::*; +pub use tpcc_load_state_type::TpccLoadState; +pub use tpcc_load_status_type::TpccLoadStatus; pub use warehouse_type::Warehouse; #[derive(Clone, PartialEq, Debug)] @@ -120,6 +142,7 @@ pub use warehouse_type::Warehouse; /// to indicate which reducer caused the event. pub enum Reducer { + ConfigureTpccLoad { request: TpccLoadConfigRequest }, ConfirmItemReservation { rollback_token: u64 }, LoadCustomers { rows: Vec }, LoadDistricts { rows: Vec }, @@ -132,8 +155,11 @@ pub enum Reducer { LoadStocks { rows: Vec }, LoadWarehouses { rows: Vec }, ResetTpcc, + RestartTpccLoad, + ResumeTpccLoad, RollbackItemReservation { rollback_token: u64 }, SetSpacetimedbUri { uri: String }, + StartTpccLoad, } impl __sdk::InModule for Reducer { @@ -143,6 +169,7 @@ impl __sdk::InModule for Reducer { impl __sdk::Reducer for Reducer { fn reducer_name(&self) -> &'static str { match self { + Reducer::ConfigureTpccLoad { .. } => "configure_tpcc_load", Reducer::ConfirmItemReservation { .. } => "confirm_item_reservation", Reducer::LoadCustomers { .. } => "load_customers", Reducer::LoadDistricts { .. } => "load_districts", @@ -155,14 +182,22 @@ impl __sdk::Reducer for Reducer { Reducer::LoadStocks { .. } => "load_stocks", Reducer::LoadWarehouses { .. } => "load_warehouses", Reducer::ResetTpcc => "reset_tpcc", + Reducer::RestartTpccLoad => "restart_tpcc_load", + Reducer::ResumeTpccLoad => "resume_tpcc_load", Reducer::RollbackItemReservation { .. } => "rollback_item_reservation", Reducer::SetSpacetimedbUri { .. } => "set_spacetimedb_uri", + Reducer::StartTpccLoad => "start_tpcc_load", _ => unreachable!(), } } #[allow(clippy::clone_on_copy)] fn args_bsatn(&self) -> Result, __sats::bsatn::EncodeError> { match self { + Reducer::ConfigureTpccLoad { request } => { + __sats::bsatn::to_vec(&configure_tpcc_load_reducer::ConfigureTpccLoadArgs { + request: request.clone(), + }) + } Reducer::ConfirmItemReservation { rollback_token } => { __sats::bsatn::to_vec(&confirm_item_reservation_reducer::ConfirmItemReservationArgs { rollback_token: rollback_token.clone(), @@ -199,6 +234,8 @@ impl __sdk::Reducer for Reducer { __sats::bsatn::to_vec(&load_warehouses_reducer::LoadWarehousesArgs { rows: rows.clone() }) } Reducer::ResetTpcc => __sats::bsatn::to_vec(&reset_tpcc_reducer::ResetTpccArgs {}), + Reducer::RestartTpccLoad => __sats::bsatn::to_vec(&restart_tpcc_load_reducer::RestartTpccLoadArgs {}), + Reducer::ResumeTpccLoad => __sats::bsatn::to_vec(&resume_tpcc_load_reducer::ResumeTpccLoadArgs {}), Reducer::RollbackItemReservation { rollback_token } => { __sats::bsatn::to_vec(&rollback_item_reservation_reducer::RollbackItemReservationArgs { rollback_token: rollback_token.clone(), @@ -207,6 +244,7 @@ impl __sdk::Reducer for Reducer { Reducer::SetSpacetimedbUri { uri } => { __sats::bsatn::to_vec(&set_spacetimedb_uri_reducer::SetSpacetimedbUriArgs { uri: uri.clone() }) } + Reducer::StartTpccLoad => __sats::bsatn::to_vec(&start_tpcc_load_reducer::StartTpccLoadArgs {}), _ => unreachable!(), } } @@ -215,7 +253,9 @@ impl __sdk::Reducer for Reducer { #[derive(Default, Debug)] #[allow(non_snake_case)] #[doc(hidden)] -pub struct DbUpdate {} +pub struct DbUpdate { + tpcc_load_state: __sdk::TableUpdate, +} impl TryFrom<__ws::v2::TransactionUpdate> for DbUpdate { type Error = __sdk::Error; @@ -223,6 +263,10 @@ impl TryFrom<__ws::v2::TransactionUpdate> for DbUpdate { let mut db_update = DbUpdate::default(); for table_update in __sdk::transaction_update_iter_table_updates(raw) { match &table_update.table_name[..] { + "tpcc_load_state" => db_update + .tpcc_load_state + .append(tpcc_load_state_table::parse_table_update(table_update)?), + unknown => { return Err(__sdk::InternalError::unknown_name("table", unknown, "DatabaseUpdate").into()); } @@ -240,12 +284,19 @@ impl __sdk::DbUpdate for DbUpdate { fn apply_to_client_cache(&self, cache: &mut __sdk::ClientCache) -> AppliedDiff<'_> { let mut diff = AppliedDiff::default(); + diff.tpcc_load_state = cache + .apply_diff_to_table::("tpcc_load_state", &self.tpcc_load_state) + .with_updates_by_pk(|row| &row.singleton_id); + diff } fn parse_initial_rows(raw: __ws::v2::QueryRows) -> __sdk::Result { let mut db_update = DbUpdate::default(); for table_rows in raw.tables { match &table_rows.table[..] { + "tpcc_load_state" => db_update + .tpcc_load_state + .append(__sdk::parse_row_list_as_inserts(table_rows.rows)?), unknown => { return Err(__sdk::InternalError::unknown_name("table", unknown, "QueryRows").into()); } @@ -257,6 +308,9 @@ impl __sdk::DbUpdate for DbUpdate { let mut db_update = DbUpdate::default(); for table_rows in raw.tables { match &table_rows.table[..] { + "tpcc_load_state" => db_update + .tpcc_load_state + .append(__sdk::parse_row_list_as_deletes(table_rows.rows)?), unknown => { return Err(__sdk::InternalError::unknown_name("table", unknown, "QueryRows").into()); } @@ -270,6 +324,7 @@ impl __sdk::DbUpdate for DbUpdate { #[allow(non_snake_case)] #[doc(hidden)] pub struct AppliedDiff<'r> { + tpcc_load_state: __sdk::TableAppliedDiff<'r, TpccLoadState>, __unused: std::marker::PhantomData<&'r ()>, } @@ -278,7 +333,9 @@ impl __sdk::InModule for AppliedDiff<'_> { } impl<'r> __sdk::AppliedDiff<'r> for AppliedDiff<'r> { - fn invoke_row_callbacks(&self, event: &EventContext, callbacks: &mut __sdk::DbCallbacks) {} + fn invoke_row_callbacks(&self, event: &EventContext, callbacks: &mut __sdk::DbCallbacks) { + callbacks.invoke_table_row_callbacks::("tpcc_load_state", &self.tpcc_load_state, event); + } } #[doc(hidden)] @@ -934,6 +991,8 @@ impl __sdk::SpacetimeModule for RemoteModule { type SubscriptionHandle = SubscriptionHandle; type QueryBuilder = __sdk::QueryBuilder; - fn register_tables(client_cache: &mut __sdk::ClientCache) {} - const ALL_TABLE_NAMES: &'static [&'static str] = &[]; + fn register_tables(client_cache: &mut __sdk::ClientCache) { + tpcc_load_state_table::register_table(client_cache); + } + const ALL_TABLE_NAMES: &'static [&'static str] = &["tpcc_load_state"]; } diff --git a/tools/tpcc-runner/src/module_bindings/restart_tpcc_load_reducer.rs b/tools/tpcc-runner/src/module_bindings/restart_tpcc_load_reducer.rs new file mode 100644 index 00000000000..85a25ad7cb0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/restart_tpcc_load_reducer.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct RestartTpccLoadArgs {} + +impl From for super::Reducer { + fn from(args: RestartTpccLoadArgs) -> Self { + Self::RestartTpccLoad + } +} + +impl __sdk::InModule for RestartTpccLoadArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `restart_tpcc_load`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait restart_tpcc_load { + /// Request that the remote module invoke the reducer `restart_tpcc_load` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`restart_tpcc_load:restart_tpcc_load_then`] to run a callback after the reducer completes. + fn restart_tpcc_load(&self) -> __sdk::Result<()> { + self.restart_tpcc_load_then(|_, _| {}) + } + + /// Request that the remote module invoke the reducer `restart_tpcc_load` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn restart_tpcc_load_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl restart_tpcc_load for super::RemoteReducers { + fn restart_tpcc_load_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(RestartTpccLoadArgs {}, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/resume_tpcc_load_reducer.rs b/tools/tpcc-runner/src/module_bindings/resume_tpcc_load_reducer.rs new file mode 100644 index 00000000000..4e83a5f3bd9 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/resume_tpcc_load_reducer.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct ResumeTpccLoadArgs {} + +impl From for super::Reducer { + fn from(args: ResumeTpccLoadArgs) -> Self { + Self::ResumeTpccLoad + } +} + +impl __sdk::InModule for ResumeTpccLoadArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `resume_tpcc_load`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait resume_tpcc_load { + /// Request that the remote module invoke the reducer `resume_tpcc_load` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`resume_tpcc_load:resume_tpcc_load_then`] to run a callback after the reducer completes. + fn resume_tpcc_load(&self) -> __sdk::Result<()> { + self.resume_tpcc_load_then(|_, _| {}) + } + + /// Request that the remote module invoke the reducer `resume_tpcc_load` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn resume_tpcc_load_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl resume_tpcc_load for super::RemoteReducers { + fn resume_tpcc_load_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(ResumeTpccLoadArgs {}, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/start_tpcc_load_reducer.rs b/tools/tpcc-runner/src/module_bindings/start_tpcc_load_reducer.rs new file mode 100644 index 00000000000..c815141d59a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/start_tpcc_load_reducer.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct StartTpccLoadArgs {} + +impl From for super::Reducer { + fn from(args: StartTpccLoadArgs) -> Self { + Self::StartTpccLoad + } +} + +impl __sdk::InModule for StartTpccLoadArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `start_tpcc_load`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait start_tpcc_load { + /// Request that the remote module invoke the reducer `start_tpcc_load` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`start_tpcc_load:start_tpcc_load_then`] to run a callback after the reducer completes. + fn start_tpcc_load(&self) -> __sdk::Result<()> { + self.start_tpcc_load_then(|_, _| {}) + } + + /// Request that the remote module invoke the reducer `start_tpcc_load` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn start_tpcc_load_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl start_tpcc_load for super::RemoteReducers { + fn start_tpcc_load_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(StartTpccLoadArgs {}, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/tpcc_load_config_request_type.rs b/tools/tpcc-runner/src/module_bindings/tpcc_load_config_request_type.rs new file mode 100644 index 00000000000..a7bffce8f23 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/tpcc_load_config_request_type.rs @@ -0,0 +1,23 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct TpccLoadConfigRequest { + pub database_number: u16, + pub num_databases: u16, + pub warehouses_per_database: u16, + pub batch_size: u32, + pub seed: u64, + pub load_c_last: u32, + pub base_ts: __sdk::Timestamp, + pub spacetimedb_uri: String, + pub database_identities: Vec<__sdk::Identity>, +} + +impl __sdk::InModule for TpccLoadConfigRequest { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/tpcc_load_config_type.rs b/tools/tpcc-runner/src/module_bindings/tpcc_load_config_type.rs new file mode 100644 index 00000000000..7fd6e015397 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/tpcc_load_config_type.rs @@ -0,0 +1,76 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct TpccLoadConfig { + pub singleton_id: u8, + pub database_number: u16, + pub num_databases: u16, + pub warehouses_per_database: u16, + pub batch_size: u32, + pub seed: u64, + pub load_c_last: u32, + pub base_ts: __sdk::Timestamp, + pub spacetimedb_uri: String, + pub database_identities: Vec<__sdk::Identity>, +} + +impl __sdk::InModule for TpccLoadConfig { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `TpccLoadConfig`. +/// +/// Provides typed access to columns for query building. +pub struct TpccLoadConfigCols { + pub singleton_id: __sdk::__query_builder::Col, + pub database_number: __sdk::__query_builder::Col, + pub num_databases: __sdk::__query_builder::Col, + pub warehouses_per_database: __sdk::__query_builder::Col, + pub batch_size: __sdk::__query_builder::Col, + pub seed: __sdk::__query_builder::Col, + pub load_c_last: __sdk::__query_builder::Col, + pub base_ts: __sdk::__query_builder::Col, + pub spacetimedb_uri: __sdk::__query_builder::Col, + pub database_identities: __sdk::__query_builder::Col>, +} + +impl __sdk::__query_builder::HasCols for TpccLoadConfig { + type Cols = TpccLoadConfigCols; + fn cols(table_name: &'static str) -> Self::Cols { + TpccLoadConfigCols { + singleton_id: __sdk::__query_builder::Col::new(table_name, "singleton_id"), + database_number: __sdk::__query_builder::Col::new(table_name, "database_number"), + num_databases: __sdk::__query_builder::Col::new(table_name, "num_databases"), + warehouses_per_database: __sdk::__query_builder::Col::new(table_name, "warehouses_per_database"), + batch_size: __sdk::__query_builder::Col::new(table_name, "batch_size"), + seed: __sdk::__query_builder::Col::new(table_name, "seed"), + load_c_last: __sdk::__query_builder::Col::new(table_name, "load_c_last"), + base_ts: __sdk::__query_builder::Col::new(table_name, "base_ts"), + spacetimedb_uri: __sdk::__query_builder::Col::new(table_name, "spacetimedb_uri"), + database_identities: __sdk::__query_builder::Col::new(table_name, "database_identities"), + } + } +} + +/// Indexed column accessor struct for the table `TpccLoadConfig`. +/// +/// Provides typed access to indexed columns for query building. +pub struct TpccLoadConfigIxCols { + pub singleton_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for TpccLoadConfig { + type IxCols = TpccLoadConfigIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + TpccLoadConfigIxCols { + singleton_id: __sdk::__query_builder::IxCol::new(table_name, "singleton_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for TpccLoadConfig {} diff --git a/tools/tpcc-runner/src/module_bindings/tpcc_load_job_type.rs b/tools/tpcc-runner/src/module_bindings/tpcc_load_job_type.rs new file mode 100644 index 00000000000..484fd587c1b --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/tpcc_load_job_type.rs @@ -0,0 +1,69 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::tpcc_load_phase_type::TpccLoadPhase; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct TpccLoadJob { + pub scheduled_id: u64, + pub scheduled_at: __sdk::ScheduleAt, + pub phase: TpccLoadPhase, + pub next_warehouse_id: u16, + pub next_district_id: u8, + pub next_item_id: u32, + pub next_order_id: u32, +} + +impl __sdk::InModule for TpccLoadJob { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `TpccLoadJob`. +/// +/// Provides typed access to columns for query building. +pub struct TpccLoadJobCols { + pub scheduled_id: __sdk::__query_builder::Col, + pub scheduled_at: __sdk::__query_builder::Col, + pub phase: __sdk::__query_builder::Col, + pub next_warehouse_id: __sdk::__query_builder::Col, + pub next_district_id: __sdk::__query_builder::Col, + pub next_item_id: __sdk::__query_builder::Col, + pub next_order_id: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for TpccLoadJob { + type Cols = TpccLoadJobCols; + fn cols(table_name: &'static str) -> Self::Cols { + TpccLoadJobCols { + scheduled_id: __sdk::__query_builder::Col::new(table_name, "scheduled_id"), + scheduled_at: __sdk::__query_builder::Col::new(table_name, "scheduled_at"), + phase: __sdk::__query_builder::Col::new(table_name, "phase"), + next_warehouse_id: __sdk::__query_builder::Col::new(table_name, "next_warehouse_id"), + next_district_id: __sdk::__query_builder::Col::new(table_name, "next_district_id"), + next_item_id: __sdk::__query_builder::Col::new(table_name, "next_item_id"), + next_order_id: __sdk::__query_builder::Col::new(table_name, "next_order_id"), + } + } +} + +/// Indexed column accessor struct for the table `TpccLoadJob`. +/// +/// Provides typed access to indexed columns for query building. +pub struct TpccLoadJobIxCols { + pub scheduled_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for TpccLoadJob { + type IxCols = TpccLoadJobIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + TpccLoadJobIxCols { + scheduled_id: __sdk::__query_builder::IxCol::new(table_name, "scheduled_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for TpccLoadJob {} diff --git a/tools/tpcc-runner/src/module_bindings/tpcc_load_phase_type.rs b/tools/tpcc-runner/src/module_bindings/tpcc_load_phase_type.rs new file mode 100644 index 00000000000..391e7f90baa --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/tpcc_load_phase_type.rs @@ -0,0 +1,24 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +#[derive(Copy, Eq, Hash)] +pub enum TpccLoadPhase { + Items, + + WarehousesDistricts, + + Stock, + + CustomersHistory, + + Orders, +} + +impl __sdk::InModule for TpccLoadPhase { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/tpcc_load_state_table.rs b/tools/tpcc-runner/src/module_bindings/tpcc_load_state_table.rs new file mode 100644 index 00000000000..ea3d07e385e --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/tpcc_load_state_table.rs @@ -0,0 +1,161 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use super::tpcc_load_phase_type::TpccLoadPhase; +use super::tpcc_load_state_type::TpccLoadState; +use super::tpcc_load_status_type::TpccLoadStatus; +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +/// Table handle for the table `tpcc_load_state`. +/// +/// Obtain a handle from the [`TpccLoadStateTableAccess::tpcc_load_state`] method on [`super::RemoteTables`], +/// like `ctx.db.tpcc_load_state()`. +/// +/// Users are encouraged not to explicitly reference this type, +/// but to directly chain method calls, +/// like `ctx.db.tpcc_load_state().on_insert(...)`. +pub struct TpccLoadStateTableHandle<'ctx> { + imp: __sdk::TableHandle, + ctx: std::marker::PhantomData<&'ctx super::RemoteTables>, +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the table `tpcc_load_state`. +/// +/// Implemented for [`super::RemoteTables`]. +pub trait TpccLoadStateTableAccess { + #[allow(non_snake_case)] + /// Obtain a [`TpccLoadStateTableHandle`], which mediates access to the table `tpcc_load_state`. + fn tpcc_load_state(&self) -> TpccLoadStateTableHandle<'_>; +} + +impl TpccLoadStateTableAccess for super::RemoteTables { + fn tpcc_load_state(&self) -> TpccLoadStateTableHandle<'_> { + TpccLoadStateTableHandle { + imp: self.imp.get_table::("tpcc_load_state"), + ctx: std::marker::PhantomData, + } + } +} + +pub struct TpccLoadStateInsertCallbackId(__sdk::CallbackId); +pub struct TpccLoadStateDeleteCallbackId(__sdk::CallbackId); + +impl<'ctx> __sdk::Table for TpccLoadStateTableHandle<'ctx> { + type Row = TpccLoadState; + type EventContext = super::EventContext; + + fn count(&self) -> u64 { + self.imp.count() + } + fn iter(&self) -> impl Iterator + '_ { + self.imp.iter() + } + + type InsertCallbackId = TpccLoadStateInsertCallbackId; + + fn on_insert( + &self, + callback: impl FnMut(&Self::EventContext, &Self::Row) + Send + 'static, + ) -> TpccLoadStateInsertCallbackId { + TpccLoadStateInsertCallbackId(self.imp.on_insert(Box::new(callback))) + } + + fn remove_on_insert(&self, callback: TpccLoadStateInsertCallbackId) { + self.imp.remove_on_insert(callback.0) + } + + type DeleteCallbackId = TpccLoadStateDeleteCallbackId; + + fn on_delete( + &self, + callback: impl FnMut(&Self::EventContext, &Self::Row) + Send + 'static, + ) -> TpccLoadStateDeleteCallbackId { + TpccLoadStateDeleteCallbackId(self.imp.on_delete(Box::new(callback))) + } + + fn remove_on_delete(&self, callback: TpccLoadStateDeleteCallbackId) { + self.imp.remove_on_delete(callback.0) + } +} + +pub struct TpccLoadStateUpdateCallbackId(__sdk::CallbackId); + +impl<'ctx> __sdk::TableWithPrimaryKey for TpccLoadStateTableHandle<'ctx> { + type UpdateCallbackId = TpccLoadStateUpdateCallbackId; + + fn on_update( + &self, + callback: impl FnMut(&Self::EventContext, &Self::Row, &Self::Row) + Send + 'static, + ) -> TpccLoadStateUpdateCallbackId { + TpccLoadStateUpdateCallbackId(self.imp.on_update(Box::new(callback))) + } + + fn remove_on_update(&self, callback: TpccLoadStateUpdateCallbackId) { + self.imp.remove_on_update(callback.0) + } +} + +/// Access to the `singleton_id` unique index on the table `tpcc_load_state`, +/// which allows point queries on the field of the same name +/// via the [`TpccLoadStateSingletonIdUnique::find`] method. +/// +/// Users are encouraged not to explicitly reference this type, +/// but to directly chain method calls, +/// like `ctx.db.tpcc_load_state().singleton_id().find(...)`. +pub struct TpccLoadStateSingletonIdUnique<'ctx> { + imp: __sdk::UniqueConstraintHandle, + phantom: std::marker::PhantomData<&'ctx super::RemoteTables>, +} + +impl<'ctx> TpccLoadStateTableHandle<'ctx> { + /// Get a handle on the `singleton_id` unique index on the table `tpcc_load_state`. + pub fn singleton_id(&self) -> TpccLoadStateSingletonIdUnique<'ctx> { + TpccLoadStateSingletonIdUnique { + imp: self.imp.get_unique_constraint::("singleton_id"), + phantom: std::marker::PhantomData, + } + } +} + +impl<'ctx> TpccLoadStateSingletonIdUnique<'ctx> { + /// Find the subscribed row whose `singleton_id` column value is equal to `col_val`, + /// if such a row is present in the client cache. + pub fn find(&self, col_val: &u8) -> Option { + self.imp.find(col_val) + } +} + +#[doc(hidden)] +pub(super) fn register_table(client_cache: &mut __sdk::ClientCache) { + let _table = client_cache.get_or_make_table::("tpcc_load_state"); + _table.add_unique_constraint::("singleton_id", |row| &row.singleton_id); +} + +#[doc(hidden)] +pub(super) fn parse_table_update( + raw_updates: __ws::v2::TableUpdate, +) -> __sdk::Result<__sdk::TableUpdate> { + __sdk::TableUpdate::parse_table_update(raw_updates).map_err(|e| { + __sdk::InternalError::failed_parse("TableUpdate", "TableUpdate") + .with_cause(e) + .into() + }) +} + +#[allow(non_camel_case_types)] +/// Extension trait for query builder access to the table `TpccLoadState`. +/// +/// Implemented for [`__sdk::QueryTableAccessor`]. +pub trait tpcc_load_stateQueryTableAccess { + #[allow(non_snake_case)] + /// Get a query builder for the table `TpccLoadState`. + fn tpcc_load_state(&self) -> __sdk::__query_builder::Table; +} + +impl tpcc_load_stateQueryTableAccess for __sdk::QueryTableAccessor { + fn tpcc_load_state(&self) -> __sdk::__query_builder::Table { + __sdk::__query_builder::Table::new("tpcc_load_state") + } +} diff --git a/tools/tpcc-runner/src/module_bindings/tpcc_load_state_type.rs b/tools/tpcc-runner/src/module_bindings/tpcc_load_state_type.rs new file mode 100644 index 00000000000..b587aea9a07 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/tpcc_load_state_type.rs @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::tpcc_load_phase_type::TpccLoadPhase; +use super::tpcc_load_status_type::TpccLoadStatus; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct TpccLoadState { + pub singleton_id: u8, + pub status: TpccLoadStatus, + pub phase: TpccLoadPhase, + pub next_warehouse_id: u16, + pub next_district_id: u8, + pub next_item_id: u32, + pub next_order_id: u32, + pub chunks_completed: u64, + pub rows_inserted: u64, + pub last_error: Option, + pub started_at: Option<__sdk::Timestamp>, + pub updated_at: __sdk::Timestamp, + pub completed_at: Option<__sdk::Timestamp>, +} + +impl __sdk::InModule for TpccLoadState { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `TpccLoadState`. +/// +/// Provides typed access to columns for query building. +pub struct TpccLoadStateCols { + pub singleton_id: __sdk::__query_builder::Col, + pub status: __sdk::__query_builder::Col, + pub phase: __sdk::__query_builder::Col, + pub next_warehouse_id: __sdk::__query_builder::Col, + pub next_district_id: __sdk::__query_builder::Col, + pub next_item_id: __sdk::__query_builder::Col, + pub next_order_id: __sdk::__query_builder::Col, + pub chunks_completed: __sdk::__query_builder::Col, + pub rows_inserted: __sdk::__query_builder::Col, + pub last_error: __sdk::__query_builder::Col>, + pub started_at: __sdk::__query_builder::Col>, + pub updated_at: __sdk::__query_builder::Col, + pub completed_at: __sdk::__query_builder::Col>, +} + +impl __sdk::__query_builder::HasCols for TpccLoadState { + type Cols = TpccLoadStateCols; + fn cols(table_name: &'static str) -> Self::Cols { + TpccLoadStateCols { + singleton_id: __sdk::__query_builder::Col::new(table_name, "singleton_id"), + status: __sdk::__query_builder::Col::new(table_name, "status"), + phase: __sdk::__query_builder::Col::new(table_name, "phase"), + next_warehouse_id: __sdk::__query_builder::Col::new(table_name, "next_warehouse_id"), + next_district_id: __sdk::__query_builder::Col::new(table_name, "next_district_id"), + next_item_id: __sdk::__query_builder::Col::new(table_name, "next_item_id"), + next_order_id: __sdk::__query_builder::Col::new(table_name, "next_order_id"), + chunks_completed: __sdk::__query_builder::Col::new(table_name, "chunks_completed"), + rows_inserted: __sdk::__query_builder::Col::new(table_name, "rows_inserted"), + last_error: __sdk::__query_builder::Col::new(table_name, "last_error"), + started_at: __sdk::__query_builder::Col::new(table_name, "started_at"), + updated_at: __sdk::__query_builder::Col::new(table_name, "updated_at"), + completed_at: __sdk::__query_builder::Col::new(table_name, "completed_at"), + } + } +} + +/// Indexed column accessor struct for the table `TpccLoadState`. +/// +/// Provides typed access to indexed columns for query building. +pub struct TpccLoadStateIxCols { + pub singleton_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for TpccLoadState { + type IxCols = TpccLoadStateIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + TpccLoadStateIxCols { + singleton_id: __sdk::__query_builder::IxCol::new(table_name, "singleton_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for TpccLoadState {} diff --git a/tools/tpcc-runner/src/module_bindings/tpcc_load_status_type.rs b/tools/tpcc-runner/src/module_bindings/tpcc_load_status_type.rs new file mode 100644 index 00000000000..09e3c96b30d --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/tpcc_load_status_type.rs @@ -0,0 +1,22 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +#[derive(Copy, Eq, Hash)] +pub enum TpccLoadStatus { + Idle, + + Running, + + Failed, + + Complete, +} + +impl __sdk::InModule for TpccLoadStatus { + type Module = super::RemoteModule; +} From 398cb8a79a8bf39efed64b80d31dfbbffaa96b40 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Sat, 28 Mar 2026 15:49:48 -0700 Subject: [PATCH 34/38] Add timers around server-side loaders --- modules/tpcc/src/load.rs | 11 ++++++++++- tools/tpcc-runner/README.md | 4 ++-- tools/tpcc-runner/src/config.rs | 22 +++++++++++++++++++++- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/modules/tpcc/src/load.rs b/modules/tpcc/src/load.rs index 51933bbca8d..37d85e05c27 100644 --- a/modules/tpcc/src/load.rs +++ b/modules/tpcc/src/load.rs @@ -1,5 +1,8 @@ use spacetimedb::rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use spacetimedb::{reducer, table, Identity, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp}; +use spacetimedb::{ + log_stopwatch::LogStopwatch, reducer, table, Identity, ReducerContext, ScheduleAt, SpacetimeType, Table, + Timestamp, +}; use crate::{ customer, district, history, item, @@ -217,6 +220,7 @@ pub(crate) fn clear_load_metadata(ctx: &ReducerContext) { } fn configure_tpcc_load_internal(ctx: &ReducerContext, request: TpccLoadConfigRequest) -> Result<(), String> { + let _timer = LogStopwatch::new("configure_tpcc_load"); validate_request(&request)?; clear_load_metadata(ctx); @@ -374,6 +378,7 @@ fn run_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) - } fn load_item_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { + let _timer = LogStopwatch::new("load_items"); if job.next_item_id == 0 || job.next_item_id > ITEMS { return Err(format!("invalid item cursor {}", job.next_item_id)); } @@ -403,6 +408,7 @@ fn load_warehouse_district_chunk( config: &TpccLoadConfig, job: &TpccLoadJob, ) -> Result { + let _timer = LogStopwatch::new("load_warehouses_districts"); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < warehouse_start(config.database_number, config.warehouses_per_database) || job.next_warehouse_id > end_warehouse @@ -439,6 +445,7 @@ fn load_warehouse_district_chunk( } fn load_stock_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { + let _timer = LogStopwatch::new("load_stocks"); let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { @@ -479,6 +486,7 @@ fn load_customer_history_chunk( config: &TpccLoadConfig, job: &TpccLoadJob, ) -> Result { + let _timer = LogStopwatch::new("load_customers_history"); let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { @@ -528,6 +536,7 @@ fn load_customer_history_chunk( } fn load_order_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { + let _timer = LogStopwatch::new("load_orders"); let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md index ab00e8a8ad4..f504aba82c4 100644 --- a/tools/tpcc-runner/README.md +++ b/tools/tpcc-runner/README.md @@ -130,7 +130,7 @@ cargo run --release -p tpcc-runner -- load \ `--batch-size` still matters for `load`, but it now controls the server-side chunk size for phases like items, stock, and orders instead of the number of -rows pushed over the websocket by the client. +rows pushed over the websocket by the client. The default is `10000`. If you need the old behavior for comparison or debugging, `load-client` keeps the previous client-side row-push path and uses the same `--num-databases`, @@ -236,7 +236,7 @@ timeout_secs = 30 num_databases = 1 warehouses_per_database = 1 load_parallelism = 1 -batch_size = 500 +batch_size = 10000 reset = true [driver] diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs index 5cf5f61b83a..3418c3baf3f 100644 --- a/tools/tpcc-runner/src/config.rs +++ b/tools/tpcc-runner/src/config.rs @@ -7,6 +7,8 @@ use std::path::{Path, PathBuf}; use crate::protocol::DriverAssignment; +const DEFAULT_LOAD_BATCH_SIZE: usize = 10_000; + #[derive(Debug, Parser)] #[command(name = "tpcc-runner")] pub struct Cli { @@ -265,7 +267,10 @@ impl LoadArgs { if load_parallelism == 0 { bail!("load_parallelism must be positive"); } - let batch_size = self.batch_size.or(file.load.batch_size).unwrap_or(500); + let batch_size = self + .batch_size + .or(file.load.batch_size) + .unwrap_or(DEFAULT_LOAD_BATCH_SIZE); if batch_size == 0 { bail!("batch_size must be positive"); } @@ -445,4 +450,19 @@ mod tests { let err = args.resolve(&FileConfig::default()).unwrap_err().to_string(); assert!(err.contains("batch_size must be positive"), "{err}"); } + + #[test] + fn load_args_default_batch_size_is_10000() { + let args = LoadArgs { + connection: ConnectionArgs::default(), + num_databases: Some(1), + warehouses_per_database: Some(1), + load_parallelism: Some(1), + batch_size: None, + reset: Some(true), + }; + + let config = args.resolve(&FileConfig::default()).unwrap(); + assert_eq!(config.batch_size, DEFAULT_LOAD_BATCH_SIZE); + } } From ed4c6e8a4ecaeeef7bbe42b260743fdbe3a49728 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Sat, 28 Mar 2026 20:13:52 -0700 Subject: [PATCH 35/38] Add instructions for resuming a failed load --- tools/tpcc-runner/README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md index f504aba82c4..6e3f1912e78 100644 --- a/tools/tpcc-runner/README.md +++ b/tools/tpcc-runner/README.md @@ -132,6 +132,27 @@ cargo run --release -p tpcc-runner -- load \ chunk size for phases like items, stock, and orders instead of the number of rows pushed over the websocket by the client. The default is `10000`. +If a server-side load fails, the current `load` command does not resume it +automatically. Resume each affected database manually with: + +```bash +cargo run -p spacetimedb-cli -- call -s http://127.0.0.1:3000 tpcc-0 resume_tpcc_load +``` + +Repeat that for each database, for example: + +```bash +cargo run -p spacetimedb-cli -- call -s http://127.0.0.1:3000 tpcc-0 resume_tpcc_load +cargo run -p spacetimedb-cli -- call -s http://127.0.0.1:3000 tpcc-1 resume_tpcc_load +``` + +To discard partial progress for a database and start that shard over from the +saved load configuration, call: + +```bash +cargo run -p spacetimedb-cli -- call -s http://127.0.0.1:3000 tpcc-0 restart_tpcc_load +``` + If you need the old behavior for comparison or debugging, `load-client` keeps the previous client-side row-push path and uses the same `--num-databases`, `--warehouses-per-database`, `--load-parallelism`, `--batch-size`, and From 3050d7d896330aaedf2f6d7e2890c2d29d16c321 Mon Sep 17 00:00:00 2001 From: joshua-spacetime Date: Sat, 28 Mar 2026 20:40:13 -0700 Subject: [PATCH 36/38] Revert default batch size for server side loader --- tools/tpcc-runner/README.md | 4 ++-- tools/tpcc-runner/src/config.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md index 6e3f1912e78..c896fbe9c87 100644 --- a/tools/tpcc-runner/README.md +++ b/tools/tpcc-runner/README.md @@ -130,7 +130,7 @@ cargo run --release -p tpcc-runner -- load \ `--batch-size` still matters for `load`, but it now controls the server-side chunk size for phases like items, stock, and orders instead of the number of -rows pushed over the websocket by the client. The default is `10000`. +rows pushed over the websocket by the client. The default is `500`. If a server-side load fails, the current `load` command does not resume it automatically. Resume each affected database manually with: @@ -257,7 +257,7 @@ timeout_secs = 30 num_databases = 1 warehouses_per_database = 1 load_parallelism = 1 -batch_size = 10000 +batch_size = 500 reset = true [driver] diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs index 3418c3baf3f..5f01c59d57a 100644 --- a/tools/tpcc-runner/src/config.rs +++ b/tools/tpcc-runner/src/config.rs @@ -7,7 +7,7 @@ use std::path::{Path, PathBuf}; use crate::protocol::DriverAssignment; -const DEFAULT_LOAD_BATCH_SIZE: usize = 10_000; +const DEFAULT_LOAD_BATCH_SIZE: usize = 500; #[derive(Debug, Parser)] #[command(name = "tpcc-runner")] @@ -452,7 +452,7 @@ mod tests { } #[test] - fn load_args_default_batch_size_is_10000() { + fn load_args_default_batch_size_is_500() { let args = LoadArgs { connection: ConnectionArgs::default(), num_databases: Some(1), From e44225fa5e2079ad391a253983f1f56bc387d02c Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sun, 29 Mar 2026 12:23:14 -0400 Subject: [PATCH 37/38] Rework for reducers & remote reducer calls This ditches our use of procedures for their return values and of the procedure HTTP interface. --- crates/bindings/src/remote_reducer.rs | 12 +- modules/tpcc/src/lib.rs | 340 +++++------ modules/tpcc/src/new_order.rs | 527 ++++-------------- modules/tpcc/src/payment.rs | 196 ++++--- modules/tpcc/src/remote.rs | 79 +-- tools/tpcc-runner/src/client.rs | 56 +- tools/tpcc-runner/src/loader.rs | 8 +- .../confirm_item_reservation_reducer.rs | 68 --- .../delivery_progress_procedure.rs | 54 -- ...educer.rs => delivery_progress_reducer.rs} | 44 +- .../fetch_delivery_completions_procedure.rs | 68 --- .../fetch_delivery_completions_reducer.rs | 88 +++ .../module_bindings/load_customers_reducer.rs | 2 +- .../module_bindings/load_districts_reducer.rs | 2 +- .../module_bindings/load_history_reducer.rs | 2 +- .../src/module_bindings/load_items_reducer.rs | 3 +- .../load_new_orders_reducer.rs | 2 +- .../load_order_lines_reducer.rs | 2 +- .../module_bindings/load_orders_reducer.rs | 3 +- .../load_remote_warehouses_reducer.rs | 2 +- .../module_bindings/load_stocks_reducer.rs | 3 +- .../load_warehouses_reducer.rs | 2 +- tools/tpcc-runner/src/module_bindings/mod.rs | 237 ++++++-- .../module_bindings/new_order_procedure.rs | 69 --- .../src/module_bindings/new_order_reducer.rs | 90 +++ .../order_item_and_decrement_stock_reducer.rs | 69 +++ ...input_type.rs => order_item_input_type.rs} | 5 +- ...tput_type.rs => order_item_output_type.rs} | 5 +- .../module_bindings/order_status_procedure.rs | 61 -- .../module_bindings/order_status_reducer.rs | 79 +++ .../src/module_bindings/payment_procedure.rs | 85 --- .../src/module_bindings/payment_reducer.rs | 108 ++++ .../queue_delivery_procedure.rs | 84 --- .../module_bindings/queue_delivery_reducer.rs | 107 ++++ ...reserve_item_for_remote_order_procedure.rs | 55 -- .../module_bindings/reserved_item_log_type.rs | 54 -- .../src/module_bindings/reset_tpcc_reducer.rs | 3 +- ...and_update_customer_for_payment_reducer.rs | 69 +++ .../rollback_item_reservation_reducer.rs | 68 --- .../module_bindings/spacetime_db_uri_type.rs | 45 -- .../module_bindings/stock_level_procedure.rs | 60 -- .../module_bindings/stock_level_reducer.rs | 78 +++ 42 files changed, 1300 insertions(+), 1694 deletions(-) delete mode 100644 tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs rename tools/tpcc-runner/src/module_bindings/{set_spacetimedb_uri_reducer.rs => delivery_progress_reducer.rs} (52%) delete mode 100644 tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_reducer.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/new_order_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/new_order_reducer.rs create mode 100644 tools/tpcc-runner/src/module_bindings/order_item_and_decrement_stock_reducer.rs rename tools/tpcc-runner/src/module_bindings/{reserve_item_input_type.rs => order_item_input_type.rs} (82%) rename tools/tpcc-runner/src/module_bindings/{reserve_item_output_type.rs => order_item_output_type.rs} (81%) delete mode 100644 tools/tpcc-runner/src/module_bindings/order_status_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/order_status_reducer.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/payment_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/payment_reducer.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/queue_delivery_reducer.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs create mode 100644 tools/tpcc-runner/src/module_bindings/resolve_and_update_customer_for_payment_reducer.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs delete mode 100644 tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs create mode 100644 tools/tpcc-runner/src/module_bindings/stock_level_reducer.rs diff --git a/crates/bindings/src/remote_reducer.rs b/crates/bindings/src/remote_reducer.rs index bded8bc5ae7..73bb13c7cd8 100644 --- a/crates/bindings/src/remote_reducer.rs +++ b/crates/bindings/src/remote_reducer.rs @@ -52,16 +52,22 @@ impl core::fmt::Display for RemoteCallError { /// - `reducer_name`: the name of the reducer to invoke (must be valid UTF-8). /// - `args`: BSATN-encoded reducer arguments. /// -/// Returns `Ok(())` when the remote reducer ran and succeeded. +/// Returns `Ok(bytes)` when the remote reducer ran and succeeded, with `bytes` being the reducer's output. /// Returns `Err(RemoteCallError::Failed(msg))` when the reducer ran but returned an error. /// Returns `Err(RemoteCallError::NotFound(msg))` when the database or reducer does not exist. /// Returns `Err(RemoteCallError::Unreachable(msg))` on transport failure (connection refused, timeout, …). -pub fn call_reducer_on_db(database_identity: Identity, reducer_name: &str, args: &[u8]) -> Result<(), RemoteCallError> { +pub fn call_reducer_on_db( + database_identity: Identity, + reducer_name: &str, + args: &[u8], +) -> Result, RemoteCallError> { let identity_bytes = database_identity.to_byte_array(); match spacetimedb_bindings_sys::call_reducer_on_db(identity_bytes, reducer_name, args) { Ok((status, body_source)) => { if status < 300 { - return Ok(()); + let mut out = Vec::new(); + read_bytes_source_into(body_source, &mut out); + return Ok(out); } // Decode the response body as the error message. let msg = if body_source == spacetimedb_bindings_sys::raw::BytesSource::INVALID { diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs index afef01c8bcb..b17aaf17758 100644 --- a/modules/tpcc/src/lib.rs +++ b/modules/tpcc/src/lib.rs @@ -1,7 +1,6 @@ use remote::reset_remote_warehouses; use spacetimedb::{ - log_stopwatch::LogStopwatch, procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, - SpacetimeType, Table, Timestamp, + log_stopwatch::LogStopwatch, reducer, table, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, }; use std::collections::BTreeSet; @@ -435,45 +434,95 @@ pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<() Ok(()) } -#[procedure] +#[reducer] pub fn order_status( - ctx: &mut ProcedureContext, + ctx: &ReducerContext, w_id: u16, d_id: u8, customer: CustomerSelector, ) -> Result { - let start_time = ctx.timestamp; - log::debug!("Starting `order_status` at {start_time:?}"); - let res = ctx.try_with_tx(|tx| order_status_tx(tx, w_id, d_id, &customer)); + let _timer = LogStopwatch::new("order_status"); + + let customer = resolve_customer(ctx, w_id, d_id, &customer)?; - match &res { - Ok(_) => log::debug!("Succesfully finished `order_status` at {start_time:?}"), - Err(e) => log::error!("Failed `order_status` at {start_time:?}: {e}"), + let mut latest_order: Option = None; + for row in ctx + .db + .oorder() + .by_w_d_c_o_id() + .filter((w_id, d_id, customer.c_id, 0u32..)) + { + latest_order = Some(row); + } + + let mut lines = Vec::new(); + if let Some(order) = &latest_order { + for line in ctx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, order.o_id, 0u8..)) + { + lines.push(OrderStatusLineResult { + item_id: line.ol_i_id, + supply_w_id: line.ol_supply_w_id, + quantity: line.ol_quantity, + amount_cents: line.ol_amount_cents, + delivery_d: line.ol_delivery_d, + }); + } } - res + + Ok(OrderStatusResult { + customer_id: customer.c_id, + customer_first: customer.c_first, + customer_middle: customer.c_middle, + customer_last: customer.c_last, + customer_balance_cents: customer.c_balance_cents, + order_id: latest_order.as_ref().map(|row| row.o_id), + order_entry_d: latest_order.as_ref().map(|row| row.o_entry_d), + carrier_id: latest_order.as_ref().and_then(|row| row.o_carrier_id), + lines, + }) } -#[procedure] -pub fn stock_level( - ctx: &mut ProcedureContext, - w_id: u16, - d_id: u8, - threshold: i32, -) -> Result { - let start_time = ctx.timestamp; - log::debug!("Starting `stock_level` at {start_time:?}"); - let res = ctx.try_with_tx(|tx| stock_level_tx(tx, w_id, d_id, threshold)); - - match &res { - Ok(_) => log::debug!("Succesfully finished `stock_level` at {start_time:?}"), - Err(e) => log::error!("Failed `stock_level` at {start_time:?}: {e}"), +#[reducer] +pub fn stock_level(ctx: &ReducerContext, w_id: u16, d_id: u8, threshold: i32) -> Result { + let _timer = LogStopwatch::new("stock_level"); + + let district = find_district(ctx, w_id, d_id)?; + let start_o_id = district.d_next_o_id.saturating_sub(20); + let end_o_id = district.d_next_o_id; + + let mut item_ids = BTreeSet::new(); + for line in ctx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, start_o_id..end_o_id)) + { + item_ids.insert(line.ol_i_id); } - res + + let mut low_stock_count = 0u32; + for item_id in item_ids { + let stock = find_stock(ctx, w_id, item_id)?; + if stock.s_quantity < threshold { + low_stock_count += 1; + } + } + + Ok(StockLevelResult { + warehouse_id: w_id, + district_id: d_id, + threshold, + low_stock_count, + }) } -#[procedure] +#[reducer] pub fn queue_delivery( - ctx: &mut ProcedureContext, + ctx: &ReducerContext, run_id: String, driver_id: String, terminal_id: u32, @@ -481,103 +530,72 @@ pub fn queue_delivery( w_id: u16, carrier_id: u8, ) -> Result { + let _timer = LogStopwatch::new("queue_delivery"); + let queued_at = ctx.timestamp; - log::debug!("Starting `queue_delivery` at {queued_at:?}"); - let res = ctx.try_with_tx(|tx| { - ensure_warehouse_exists(tx, w_id)?; - ensure!((1..=10).contains(&carrier_id), "carrier_id must be in the range 1..=10"); - - let job = tx.db.delivery_job().insert(DeliveryJob { - scheduled_id: 0, - scheduled_at: queued_at.into(), - run_id: run_id.clone(), - driver_id: driver_id.clone(), - terminal_id, - request_id, - queued_at, - w_id, - carrier_id, - next_d_id: 1, - skipped_districts: 0, - processed_districts: 0, - }); - Ok(DeliveryQueueAck { - scheduled_id: job.scheduled_id, - queued_at, - warehouse_id: w_id, - carrier_id, - }) + ensure_warehouse_exists(ctx, w_id)?; + ensure!((1..=10).contains(&carrier_id), "carrier_id must be in the range 1..=10"); + + let job = ctx.db.delivery_job().insert(DeliveryJob { + scheduled_id: 0, + scheduled_at: queued_at.into(), + run_id: run_id.clone(), + driver_id: driver_id.clone(), + terminal_id, + request_id, + queued_at, + w_id, + carrier_id, + next_d_id: 1, + skipped_districts: 0, + processed_districts: 0, }); - match &res { - Ok(_) => log::debug!("Succesfully finished `queue_delivery` at {queued_at:?}"), - Err(e) => log::error!("Failed `queue_delivery` at {queued_at:?}: {e}"), - } - res + Ok(DeliveryQueueAck { + scheduled_id: job.scheduled_id, + queued_at, + warehouse_id: w_id, + carrier_id, + }) } -#[procedure] -pub fn delivery_progress(ctx: &mut ProcedureContext, run_id: String) -> Result { - let start_time = ctx.timestamp; - log::debug!("Starting `delivery_progress` at {start_time:?}"); - let res = ctx.try_with_tx(|tx| { - let pending_jobs = tx.db.delivery_job().by_run_id().filter(&run_id).count() as u64; - let completed_jobs = tx - .db - .delivery_completion() - .by_run_completion() - .filter((&run_id, 0u64..)) - .count() as u64; - Ok(DeliveryProgress { - run_id: run_id.clone(), - pending_jobs, - completed_jobs, - }) - }); - - match &res { - Ok(_) => { - log::debug!("Successfully finished `delivery_progress` at {start_time:?}"); - } - Err(e) => { - log::error!("Failed `delivery_progress` at {start_time:?}: {e}"); - } - } - res +#[reducer] +pub fn delivery_progress(ctx: &ReducerContext, run_id: String) -> Result { + let _timer = LogStopwatch::new("delivery_progress"); + let pending_jobs = ctx.db.delivery_job().by_run_id().filter(&run_id).count() as u64; + let completed_jobs = ctx + .db + .delivery_completion() + .by_run_completion() + .filter((&run_id, 0u64..)) + .count() as u64; + Ok(DeliveryProgress { + run_id: run_id.clone(), + pending_jobs, + completed_jobs, + }) } -#[procedure] +#[reducer] pub fn fetch_delivery_completions( - ctx: &mut ProcedureContext, + ctx: &ReducerContext, run_id: String, after_completion_id: u64, limit: u32, ) -> Result, String> { - let start_time = ctx.timestamp; - log::debug!("Starting `fetch_delivery_completions` at {start_time:?}"); - let res = ctx.try_with_tx(|tx| { - let limit = limit as usize; - let rows = tx - .db - .delivery_completion() - .by_run_completion() - .filter((&run_id, after_completion_id.saturating_add(1)..)) - .take(limit) - .map(as_delivery_completion_view) - .collect(); - Ok(rows) - }); + let _timer = LogStopwatch::new("fetch_delivery_completions"); - match &res { - Ok(_) => { - log::debug!("Successfully finished `fetch_delivery_completions` at {start_time:?}"); - } - Err(e) => { - log::error!("Failed `fetch_delivery_completions` at {start_time:?}: {e}"); - } - } - res + let limit = limit as usize; + let rows = ctx + .db + .delivery_completion() + .by_run_completion() + .filter((&run_id, after_completion_id.saturating_add(1)..)) + .take(limit) + .map(as_delivery_completion_view) + .collect(); + Ok(rows) } #[reducer] @@ -667,91 +685,6 @@ fn validate_stock_row(row: &Stock) -> Result<(), String> { Ok(()) } -fn order_status_tx( - tx: &spacetimedb::TxContext, - w_id: u16, - d_id: u8, - customer_selector: &CustomerSelector, -) -> Result { - let customer = resolve_customer(tx, w_id, d_id, customer_selector)?; - - let mut latest_order: Option = None; - for row in tx - .db - .oorder() - .by_w_d_c_o_id() - .filter((w_id, d_id, customer.c_id, 0u32..)) - { - latest_order = Some(row); - } - - let mut lines = Vec::new(); - if let Some(order) = &latest_order { - for line in tx - .db - .order_line() - .by_w_d_o_number() - .filter((w_id, d_id, order.o_id, 0u8..)) - { - lines.push(OrderStatusLineResult { - item_id: line.ol_i_id, - supply_w_id: line.ol_supply_w_id, - quantity: line.ol_quantity, - amount_cents: line.ol_amount_cents, - delivery_d: line.ol_delivery_d, - }); - } - } - - Ok(OrderStatusResult { - customer_id: customer.c_id, - customer_first: customer.c_first, - customer_middle: customer.c_middle, - customer_last: customer.c_last, - customer_balance_cents: customer.c_balance_cents, - order_id: latest_order.as_ref().map(|row| row.o_id), - order_entry_d: latest_order.as_ref().map(|row| row.o_entry_d), - carrier_id: latest_order.as_ref().and_then(|row| row.o_carrier_id), - lines, - }) -} - -fn stock_level_tx( - tx: &spacetimedb::TxContext, - w_id: u16, - d_id: u8, - threshold: i32, -) -> Result { - let district = find_district(tx, w_id, d_id)?; - let start_o_id = district.d_next_o_id.saturating_sub(20); - let end_o_id = district.d_next_o_id; - - let mut item_ids = BTreeSet::new(); - for line in tx - .db - .order_line() - .by_w_d_o_number() - .filter((w_id, d_id, start_o_id..end_o_id)) - { - item_ids.insert(line.ol_i_id); - } - - let mut low_stock_count = 0u32; - for item_id in item_ids { - let stock = find_stock(tx, w_id, item_id)?; - if stock.s_quantity < threshold { - low_stock_count += 1; - } - } - - Ok(StockLevelResult { - warehouse_id: w_id, - district_id: d_id, - threshold, - low_stock_count, - }) -} - fn process_delivery_district( ctx: &ReducerContext, w_id: u16, @@ -803,12 +736,7 @@ fn process_delivery_district( Ok(true) } -fn resolve_customer( - tx: &spacetimedb::TxContext, - w_id: u16, - d_id: u8, - selector: &CustomerSelector, -) -> Result { +fn resolve_customer(tx: &ReducerContext, w_id: u16, d_id: u8, selector: &CustomerSelector) -> Result { match selector { CustomerSelector::ById(id) => find_customer_by_id(tx, w_id, d_id, *id), CustomerSelector::ByLastName(last_name) => { @@ -824,7 +752,7 @@ fn resolve_customer( } } -fn find_warehouse(tx: &spacetimedb::TxContext, w_id: u16) -> Result { +fn find_warehouse(tx: &ReducerContext, w_id: u16) -> Result { tx.db .warehouse() .w_id() @@ -832,11 +760,11 @@ fn find_warehouse(tx: &spacetimedb::TxContext, w_id: u16) -> Result Result<(), String> { +fn ensure_warehouse_exists(tx: &ReducerContext, w_id: u16) -> Result<(), String> { find_warehouse(tx, w_id).map(|_| ()) } -fn find_district(tx: &spacetimedb::TxContext, w_id: u16, d_id: u8) -> Result { +fn find_district(tx: &ReducerContext, w_id: u16, d_id: u8) -> Result { tx.db .district() .by_w_d() @@ -896,6 +824,8 @@ fn as_delivery_completion_view(row: DeliveryCompletion) -> DeliveryCompletionVie } mod test { + use spacetimedb::{procedure, ProcedureContext}; + use crate::new_order::{adjust_stock_quantity, pack_order_line_key}; use super::*; diff --git a/modules/tpcc/src/new_order.rs b/modules/tpcc/src/new_order.rs index 07b83ed02aa..931bba95c35 100644 --- a/modules/tpcc/src/new_order.rs +++ b/modules/tpcc/src/new_order.rs @@ -1,13 +1,9 @@ -use spacetimedb::{ - procedure, reducer, table, Identity, ProcedureContext, ReducerContext, SpacetimeType, Table, Timestamp, TxContext, -}; -use spacetimedb_sats::serde::SerdeWrapper; - use crate::{ district, find_customer_by_id, find_district, find_stock, find_warehouse, item, order_line, pack_order_key, - remote::{call_remote_function, get_spacetimedb_uri, remote_warehouse_home}, + remote::{call_remote_reducer, remote_warehouse_home}, stock, District, Item, OrderLine, Stock, WarehouseId, DISTRICTS_PER_WAREHOUSE, TAX_SCALE, }; +use spacetimedb::{log_stopwatch::LogStopwatch, reducer, Identity, ReducerContext, SpacetimeType, Table, Timestamp}; #[derive(Clone, Debug, SpacetimeType)] pub struct NewOrderLineInput { @@ -42,184 +38,45 @@ pub struct NewOrderResult { pub lines: Vec, } -#[procedure] +#[reducer] pub fn new_order( - ctx: &mut ProcedureContext, + ctx: &ReducerContext, w_id: WarehouseId, d_id: u8, c_id: u32, order_lines: Vec, ) -> Result { - let start_time = ctx.timestamp; - log::debug!("Starting `new_order` transaction at {start_time:?}"); - - let res = (|| { - ensure!( - (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), - "district id out of range" - ); - ensure!( - (5..=15).contains(&order_lines.len()), - "new-order requires between 5 and 15 order lines" - ); - - // Setup TX: validate warehouse, district, customer ID. - // NON-CONFORMANT: These never change in TPC-C, - // so we don't need to include the checks in the same transaction as the rest of the work. - let (warehouse, district, customer, spacetimedb_uri) = ctx.try_with_tx(|tx| { - let warehouse = find_warehouse(tx, w_id)?; - let district = find_district(tx, w_id, d_id)?; - let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; - let spacetimedb_uri = get_spacetimedb_uri(tx); - Ok::<_, String>((warehouse, district, customer, spacetimedb_uri)) - })?; - - let PartitionedItems { - local_database_items, - remote_database_items, - all_local_warehouse, - } = - // Look up all of the items in the order, and fail if any of them doesn't exist. - // If they all exist, sort them into two groups: - // - `local_database_items`, items in warehouses managed by this database. - // - `remote_database_items`, items in warehouses managed by remote databases. - // Also compute `all_local_warehouse`, which says if all of the items are in the warehouse `w_id`. - // NON-CONFORMANT: This is a separate transaction from the later one, - // which updates stock quantities for the local items and records the new order. - // In a real system, an item might change between the two, but none of the TPC-C transactions writes to items. - // We (ab)use this knowledge to skip compensating for writes to items. - partition_local_from_remote_database_items(ctx, w_id, &order_lines)?; - - // NON-CONFORMANT: We reserve items from the remote database extra-transactionally. - // If our TPC-C transaction fails, we'll roll back those reservations. - // This opens us up to dirty read isolation hazards, - // where a concurrent transaction may observe a change in stock quantity that later rolls back. - // This will never happen with only the TPC-C transactions, - // as stock quantity is only written by the `new_order` transaction, - // and `new_order` can only fail prior to updating the stock quantity, due to non-existent items. - // We (ab)use this knowledge to skip compensating for rollbacks to prevent dirty reads. - let remote_item_reservations = reserve_remote_items(ctx, &spacetimedb_uri, d_id, &remote_database_items)?; - - match ctx.try_with_tx(|tx| { - let district = tx - .db - .district() - .district_key() - .find(district.district_key) - .expect("District should not have been removed since we retrieved it last"); - let order_id = district.d_next_o_id; - tx.db.district().district_key().update(District { - d_next_o_id: order_id + 1, - ..district - }); - - let line_results = local_database_items - .iter() - .map(|local_item| claim_stock_for_local_database_item(tx, local_item, d_id)) - .chain(remote_database_items.iter().zip(remote_item_reservations.iter()).map( - |(remote_item, reserved_item)| remote_item_to_processed_new_order_item(remote_item, reserved_item), - )) - .map(|processed_item| insert_order_line(tx, w_id, d_id, order_id, processed_item)) - .collect::>(); - - let subtotal_cents = line_results.iter().map(|line_result| line_result.amount_cents).sum(); - - let taxed = apply_tax( - subtotal_cents, - i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), - ); - let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); - - Ok(NewOrderResult { - warehouse_tax_bps: warehouse.w_tax_bps, - district_tax_bps: district.d_tax_bps, - customer_discount_bps: customer.c_discount_bps, - customer_last: customer.c_last.clone(), - customer_credit: customer.c_credit.clone(), - order_id, - entry_d: tx.timestamp, - total_amount_cents, - all_local: all_local_warehouse, - lines: line_results, - }) - }) { - Ok(result) => { - confirm_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - &remote_database_items, - remote_item_reservations, - ); - Ok(result) - } - Err(e) => { - rollback_all_remote_item_reservations( - ctx, - &spacetimedb_uri, - &remote_database_items, - remote_item_reservations, - ); - Err(e) - } - } - })(); - - match &res { - Ok(_) => { - log::debug!("Successfully finished `new_order` at {start_time:?}"); - } - Err(e) => { - log::error!("Failed `new_order` at {start_time:?}: {e}"); - } - } - res -} - -struct LocalDatabaseItem { - idx: usize, - line: NewOrderLineInput, - item: Item, - is_remote_warehouse: bool, -} - -struct RemoteDatabaseItem { - idx: usize, - line: NewOrderLineInput, - item: Item, - remote_database_identity: Identity, -} + let _timer = LogStopwatch::new("new_order"); + log::debug!("Starting `new_order` transaction at {}", ctx.timestamp); + + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), + "district id out of range" + ); + ensure!( + (5..=15).contains(&order_lines.len()), + "new-order requires between 5 and 15 order lines" + ); + + let warehouse = find_warehouse(ctx, w_id)?; + + let district = find_district(ctx, w_id, d_id)?; + let order_id = district.d_next_o_id; + ctx.db.district().district_key().update(District { + d_next_o_id: order_id + 1, + ..district + }); -struct PartitionedItems { - local_database_items: Vec, - remote_database_items: Vec, + let customer = find_customer_by_id(ctx, w_id, d_id, c_id)?; - /// Are all items from the same warehouse as the requesting terminal? - /// - /// Note that this may be false even if all items are partitioned into [`Self::local_database_items`], - /// as we may manage multiple warehouses with a single database. - all_local_warehouse: bool, -} + let all_local_warehouse = order_lines.iter().all(|order_line| order_line.supply_w_id == w_id); -fn partition_local_from_remote_database_items( - ctx: &mut ProcedureContext, - local_warehouse_id: WarehouseId, - order_lines: &[NewOrderLineInput], -) -> Result { - ctx.try_with_tx(|tx| { - let mut local_database_items: Vec = Vec::with_capacity(order_lines.len()); - let mut remote_database_items: Vec = Vec::with_capacity(order_lines.len()); - - // Whether this order applies only to a single warehouse. - // This may be `false` even when `remote_database_items_to_get` is non-empty, - // as we may run multiple warehouses from the same database. - let mut all_local_warehouse = true; - - for (idx, line) in order_lines.iter().enumerate() { + let line_results = order_lines + .into_iter() + .enumerate() + .map(|(idx, line)| { ensure!(line.quantity > 0, "order line quantity must be positive"); - let is_remote_warehouse = line.supply_w_id == local_warehouse_id; - all_local_warehouse &= !is_remote_warehouse; - // TECHNICALLY NON-CONFORMANT: If we encounter a non-existent item in the order, // we'll short-circuit and exit here. // TPC-C technically requires, in 2.4.2.3, that we still retrieve and process all the valid item numbers. @@ -228,134 +85,66 @@ fn partition_local_from_remote_database_items( // - changing the execution of other steps // - using a different type of transaction // But we do skip inspecting some number of valid items and stocks. - let item = find_item(tx, line.item_id)?; - - match remote_warehouse_home(tx, line.supply_w_id) { - None => { - // Warehouse is local to this database. - // We'll actually "process" the items, i.e. decrement the stock and sum the order price, - // after we look up and process all the remote items. - local_database_items.push(LocalDatabaseItem { - idx, - line: line.clone(), - item, - is_remote_warehouse, - }); - } - Some(remote_database_identity) => { - // Warehouse is on another database; we'll have to do a remote request. - // This is *really* non-conformant. - // TODO(docs): link to blog post justifying this. - remote_database_items.push(RemoteDatabaseItem { - idx, - line: line.clone(), - item, - remote_database_identity, - }); - } - } - } + let item = find_item(ctx, line.item_id)?; - Ok(PartitionedItems { - local_database_items, - remote_database_items, - all_local_warehouse, - }) - }) -} + let is_remote_warehouse = w_id == line.supply_w_id; + let supply_warehouse_id = line.supply_w_id; -fn reserve_remote_items( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, - district_id: u8, - remote_database_items: &[RemoteDatabaseItem], -) -> Result, String> { - let mut remote_item_reservations: Vec = Vec::with_capacity(remote_database_items.len()); + let input = OrderItemInput { + line: line.clone(), + district: d_id, + is_remote_warehouse, + }; - for RemoteDatabaseItem { - line, - remote_database_identity, - .. - } in remote_database_items - { - match call_remote_function( - ctx, - spacetimedb_uri, - *remote_database_identity, - "reserve_item_for_remote_order", - ReserveItemInput { - line: NewOrderLineInput::clone(line), - district: district_id, - }, - ) { - Err(e) => { - rollback_all_remote_item_reservations( - ctx, - spacetimedb_uri, - remote_database_items, - remote_item_reservations, - ); - return Err(format!("Error reserving remote item: {e}")); - } - Ok(body) => { - let body = body.into_string().expect("Body should be valid UTF-8"); - let res: SerdeWrapper> = - serde_json::from_str(&body).expect("Response does not conform to expected schema"); - match res.0 { - Err(e) => { - rollback_all_remote_item_reservations( - ctx, - spacetimedb_uri, - remote_database_items, - remote_item_reservations, - ); - return Err(format!("Error reserving remote item from database: {e}")); - } - Ok(output) => remote_item_reservations.push(output), + let order_item_output = match remote_warehouse_home(ctx, supply_warehouse_id) { + None => order_item_and_decrement_stock(ctx, input)?, + Some(remote_database_identity) => { + call_remote_order_item_and_decrement_stock(ctx, remote_database_identity, input)? } - } - }; - } - - Ok(remote_item_reservations) -} - -fn rollback_all_remote_item_reservations( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, - remote_items: &[RemoteDatabaseItem], - reservations: Vec, -) { - for (remote_item, reservation) in remote_items.iter().zip(reservations.into_iter()) { - if let Err(e) = call_remote_function( - ctx, - spacetimedb_uri, - remote_item.remote_database_identity, - "rollback_item_reservation", - reservation.rollback_token, - ) { - log::error!("Error rollinb back item reservation: {e}"); - } - } + }; + + Ok(ProcessedNewOrderItem { + idx, + line, + item, + district_stock_info: order_item_output.s_dist, + stock_data: order_item_output.s_data, + updated_quantity: order_item_output.updated_quantity, + }) + }) + .map(|processed_item| { + processed_item.map(|processed_item| insert_order_line(ctx, w_id, d_id, order_id, processed_item)) + }) + .collect::, String>>()?; + + let subtotal_cents = line_results.iter().map(|line_result| line_result.amount_cents).sum(); + + let taxed = apply_tax( + subtotal_cents, + i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), + ); + let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); + + Ok(NewOrderResult { + warehouse_tax_bps: warehouse.w_tax_bps, + district_tax_bps: district.d_tax_bps, + customer_discount_bps: customer.c_discount_bps, + customer_last: customer.c_last.clone(), + customer_credit: customer.c_credit.clone(), + order_id, + entry_d: ctx.timestamp, + total_amount_cents, + all_local: all_local_warehouse, + lines: line_results, + }) } -fn confirm_all_remote_item_reservations( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, - remote_items: &[RemoteDatabaseItem], - reservations: Vec, -) { - for (remote_item, reservation) in remote_items.iter().zip(reservations.into_iter()) { - if let Err(e) = call_remote_function( - ctx, - spacetimedb_uri, - remote_item.remote_database_identity, - "confirm_item_reservation", - reservation.rollback_token, - ) { - log::error!("Error confirming item reservation: {e}"); - } - } +fn call_remote_order_item_and_decrement_stock( + ctx: &ReducerContext, + remote_database_identity: Identity, + input: OrderItemInput, +) -> Result { + call_remote_reducer(ctx, remote_database_identity, "order_item_and_decrement_stock", &input) } struct ProcessedNewOrderItem { @@ -367,48 +156,8 @@ struct ProcessedNewOrderItem { updated_quantity: i32, } -fn claim_stock_for_local_database_item( - tx: &TxContext, - local_item: &LocalDatabaseItem, - district_id: u8, -) -> ProcessedNewOrderItem { - let stock = - find_stock(tx, local_item.line.supply_w_id, local_item.line.item_id).expect("Stock should exist for all items"); - let updated_quantity = adjust_stock_quantity(stock.s_quantity, local_item.line.quantity as i32); - tx.db.stock().stock_key().update(Stock { - s_quantity: updated_quantity, - s_ytd: stock.s_ytd + local_item.line.quantity as u64, - s_order_cnt: stock.s_order_cnt + 1, - s_remote_cnt: stock.s_remote_cnt + u32::from(local_item.is_remote_warehouse), - ..stock.clone() - }); - - ProcessedNewOrderItem { - idx: local_item.idx, - line: local_item.line.clone(), - item: local_item.item.clone(), - district_stock_info: district_stock_info(&stock, district_id), - stock_data: stock.s_data.clone(), - updated_quantity, - } -} - -fn remote_item_to_processed_new_order_item( - remote_item: &RemoteDatabaseItem, - reserved_item: &ReserveItemOutput, -) -> ProcessedNewOrderItem { - ProcessedNewOrderItem { - idx: remote_item.idx, - line: remote_item.line.clone(), - item: remote_item.item.clone(), - district_stock_info: reserved_item.s_dist.clone(), - stock_data: reserved_item.s_data.clone(), - updated_quantity: reserved_item.updated_quantity, - } -} - fn insert_order_line( - tx: &TxContext, + tx: &ReducerContext, warehouse_id: WarehouseId, district_id: u8, order_id: u32, @@ -455,89 +204,47 @@ fn insert_order_line( } #[derive(SpacetimeType)] -pub struct ReserveItemOutput { +pub struct OrderItemOutput { s_dist: String, s_data: String, updated_quantity: i32, - rollback_token: u64, -} - -#[table(accessor = reserved_item_log)] -pub struct ReservedItemLog { - #[primary_key] - #[auto_inc] - rollback_token: u64, - line: NewOrderLineInput, } #[derive(SpacetimeType)] -pub struct ReserveItemInput { +pub struct OrderItemInput { line: NewOrderLineInput, district: u8, -} - -#[procedure] -pub fn reserve_item_for_remote_order( - ctx: &mut ProcedureContext, - input: ReserveItemInput, -) -> Result { - let ReserveItemInput { line, district } = input; - ctx.try_with_tx(|tx| { - let stock = find_stock(tx, line.supply_w_id, line.item_id)?; - - let ReservedItemLog { rollback_token, .. } = tx.db.reserved_item_log().insert(ReservedItemLog { - rollback_token: 0, - line: line.clone(), - }); - - let reserved_quantity = line.quantity; - let updated_quantity = adjust_stock_quantity(stock.s_quantity, reserved_quantity as i32); - - let reserved = ReserveItemOutput { - s_dist: district_stock_info(&stock, district), - s_data: stock.s_data.clone(), - updated_quantity, - rollback_token, - }; - - tx.db.stock().stock_key().update(Stock { - s_quantity: updated_quantity, - s_ytd: stock.s_ytd + u64::from(reserved_quantity), - s_order_cnt: stock.s_order_cnt + 1, - // This must be an order from a remote warehouse, it's coming from a whole different database. - s_remote_cnt: stock.s_remote_cnt + 1, - ..stock - }); - - Ok(reserved) - }) + is_remote_warehouse: bool, } #[reducer] -pub fn rollback_item_reservation(ctx: &ReducerContext, rollback_token: u64) -> Result<(), String> { - let line = ctx - .db - .reserved_item_log() - .rollback_token() - .find(rollback_token) - .ok_or_else(|| format!("No such rollback token: {rollback_token}"))? - .line; +pub fn order_item_and_decrement_stock(ctx: &ReducerContext, input: OrderItemInput) -> Result { + let _timer = LogStopwatch::new("order_item_and_decrement_stock"); + let OrderItemInput { + line, + district, + is_remote_warehouse, + } = input; let stock = find_stock(ctx, line.supply_w_id, line.item_id)?; - let quantity = line.quantity; + + let ordered_quantity = line.quantity; + let updated_quantity = adjust_stock_quantity(stock.s_quantity, ordered_quantity as i32); + + let output = OrderItemOutput { + s_dist: district_stock_info(&stock, district), + s_data: stock.s_data.clone(), + updated_quantity, + }; + ctx.db.stock().stock_key().update(Stock { - s_quantity: reverse_stock_quantity(stock.s_quantity, quantity as i32), - s_ytd: stock.s_ytd - line.quantity as u64, - s_order_cnt: stock.s_order_cnt - 1, - s_remote_cnt: stock.s_remote_cnt - 1, + s_quantity: updated_quantity, + s_ytd: stock.s_ytd + u64::from(ordered_quantity), + s_order_cnt: stock.s_order_cnt + 1, + s_remote_cnt: stock.s_remote_cnt + is_remote_warehouse as u32, ..stock }); - ctx.db.reserved_item_log().rollback_token().delete(rollback_token); - Ok(()) -} -#[reducer] -pub fn confirm_item_reservation(ctx: &ReducerContext, rollback_token: u64) { - ctx.db.reserved_item_log().rollback_token().delete(rollback_token); + Ok(output) } fn apply_tax(amount_cents: i64, total_tax_bps: i64) -> i64 { @@ -548,7 +255,7 @@ fn apply_discount(amount_cents: i64, discount_bps: i64) -> i64 { amount_cents * (TAX_SCALE - discount_bps) / TAX_SCALE } -fn find_item(tx: &spacetimedb::TxContext, item_id: u32) -> Result { +fn find_item(tx: &ReducerContext, item_id: u32) -> Result { tx.db .item() .i_id() @@ -567,18 +274,6 @@ pub fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i3 } } -/// NON-CONFORMANT: we're abusing the fact that TPC-C updates stock quantities in a predictable way -/// which is both commutative and associative to be able to roll back stock reservations. -fn reverse_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { - assert!(ordered_quantity >= 1); - assert!(ordered_quantity <= 10); - if current_quantity + ordered_quantity >= 91 { - current_quantity + ordered_quantity - 91 - } else { - current_quantity + ordered_quantity - } -} - fn district_stock_info(stock: &Stock, d_id: u8) -> String { match d_id { 1 => stock.s_dist_01.clone(), diff --git a/modules/tpcc/src/payment.rs b/modules/tpcc/src/payment.rs index ea6add08b5c..97e1347ebf7 100644 --- a/modules/tpcc/src/payment.rs +++ b/modules/tpcc/src/payment.rs @@ -1,11 +1,12 @@ -use spacetimedb::{procedure, ProcedureContext, SpacetimeType, Table, Timestamp, TxContext}; -use spacetimedb_sats::serde::SerdeWrapper; - use crate::{ customer, district, find_district, find_warehouse, history, - remote::{call_remote_function, get_spacetimedb_uri, remote_warehouse_home}, + remote::{call_remote_reducer, remote_warehouse_home}, resolve_customer, warehouse, Customer, CustomerSelector, District, History, Warehouse, WarehouseId, MAX_C_DATA_LEN, }; +use spacetimedb::{ + log_stopwatch::LogStopwatch, procedure, reducer, Identity, ProcedureContext, ReducerContext, SpacetimeType, Table, + Timestamp, +}; #[derive(Clone, Debug, SpacetimeType)] pub struct PaymentResult { @@ -22,115 +23,81 @@ pub struct PaymentResult { pub customer_data: Option, } -#[procedure] -pub fn payment( - ctx: &mut ProcedureContext, - w_id: u16, +#[reducer] +fn payment( + ctx: &ReducerContext, + w_id: WarehouseId, d_id: u8, - c_w_id: u16, + c_w_id: WarehouseId, c_d_id: u8, customer: CustomerSelector, payment_amount_cents: i64, ) -> Result { - let now = ctx.timestamp; + let _timer = LogStopwatch::new("payment"); - let res = (|| { - let (warehouse_home, spacetimedb_uri) = - ctx.with_tx(|tx| (remote_warehouse_home(tx, c_w_id), get_spacetimedb_uri(tx))); - let payment_request = PaymentRequest { - terminal_warehouse_id: w_id, - terminal_district_id: d_id, - customer_warehouse_id: c_w_id, - customer_district_id: c_d_id, - customer_selector: customer, - payment_amount_cents, - now, - }; - let customer = match warehouse_home { - None => { - // Customer warehouse is managed by this database. - ctx.try_with_tx(|tx| { - let customer = resolve_customer(tx, c_w_id, c_d_id, &payment_request.customer_selector)?; - Ok::<_, String>(update_customer(tx, &payment_request, customer)) - })? - } - Some(remote_database) => { - // Customer warehouse is managed by a remote database. - // Contact them to update the customer's balance and retrieve their info. - let body = call_remote_function( - ctx, - &spacetimedb_uri, - remote_database, - "process_remote_payment", - payment_request.clone(), - )? - .into_string() - .expect("Body should be valid UTF-8"); - let res: SerdeWrapper> = - serde_json::from_str(&body).expect("Response does not conform to expected schema"); - res.0? - } - }; + let payment_request = PaymentRequest { + terminal_warehouse_id: w_id, + terminal_district_id: d_id, + customer_warehouse_id: c_w_id, + customer_district_id: c_d_id, + customer_selector: customer, + payment_amount_cents, + now: ctx.timestamp, + }; - ctx.try_with_tx(|tx| { - let warehouse = find_warehouse(tx, payment_request.terminal_warehouse_id)?; - let district = find_district( - tx, - payment_request.terminal_warehouse_id, - payment_request.terminal_district_id, - )?; + let customer = match remote_warehouse_home(ctx, c_w_id) { + None => resolve_and_update_customer_for_payment(ctx, payment_request.clone())?, + Some(remote_database_identity) => { + call_remote_resolve_and_update_customer_for_payment(ctx, remote_database_identity, &payment_request)? + } + }; - tx.db.warehouse().w_id().update(Warehouse { - w_ytd_cents: warehouse.w_ytd_cents + payment_request.payment_amount_cents, - ..warehouse.clone() - }); + let warehouse = find_warehouse(ctx, payment_request.terminal_warehouse_id)?; + let district = find_district( + ctx, + payment_request.terminal_warehouse_id, + payment_request.terminal_district_id, + )?; - tx.db.district().district_key().update(District { - d_ytd_cents: district.d_ytd_cents + payment_request.payment_amount_cents, - ..district.clone() - }); + ctx.db.warehouse().w_id().update(Warehouse { + w_ytd_cents: warehouse.w_ytd_cents + payment_request.payment_amount_cents, + ..warehouse.clone() + }); - tx.db.history().insert(History { - history_id: 0, - h_c_id: customer.c_id, - h_c_d_id: customer.c_d_id, - h_c_w_id: customer.c_w_id, - h_d_id: payment_request.terminal_district_id, - h_w_id: payment_request.terminal_warehouse_id, - h_date: payment_request.now, - h_amount_cents: payment_request.payment_amount_cents, - h_data: format!("{} {}", warehouse.w_name, district.d_name), - }); + ctx.db.district().district_key().update(District { + d_ytd_cents: district.d_ytd_cents + payment_request.payment_amount_cents, + ..district.clone() + }); - Ok(PaymentResult { - warehouse_name: warehouse.w_name, - district_name: district.d_name, - customer_id: customer.c_id, - customer_first: customer.c_first.clone(), - customer_middle: customer.c_middle.clone(), - customer_last: customer.c_last.clone(), - customer_balance_cents: customer.c_balance_cents, - customer_credit: customer.c_credit.clone(), - customer_discount_bps: customer.c_discount_bps, - payment_amount_cents: payment_request.payment_amount_cents, - customer_data: if customer.c_credit == "BC" { - Some(customer.c_data.clone()) - } else { - None - }, - }) - }) - })(); + ctx.db.history().insert(History { + history_id: 0, + h_c_id: customer.c_id, + h_c_d_id: customer.c_d_id, + h_c_w_id: customer.c_w_id, + h_d_id: payment_request.terminal_district_id, + h_w_id: payment_request.terminal_warehouse_id, + h_date: payment_request.now, + h_amount_cents: payment_request.payment_amount_cents, + h_data: format!("{} {}", warehouse.w_name, district.d_name), + }); - match &res { - Ok(_) => { - log::debug!("Successfully finished `payment` at {now:?}"); - } - Err(e) => { - log::error!("Failed `payment` at {now:?}: {e}"); - } - } - res + Ok(PaymentResult { + warehouse_name: warehouse.w_name, + district_name: district.d_name, + customer_id: customer.c_id, + customer_first: customer.c_first.clone(), + customer_middle: customer.c_middle.clone(), + customer_last: customer.c_last.clone(), + customer_balance_cents: customer.c_balance_cents, + customer_credit: customer.c_credit.clone(), + customer_discount_bps: customer.c_discount_bps, + payment_amount_cents: payment_request.payment_amount_cents, + customer_data: if customer.c_credit == "BC" { + Some(customer.c_data.clone()) + } else { + None + }, + }) } #[derive(SpacetimeType, Clone)] @@ -144,6 +111,31 @@ struct PaymentRequest { now: Timestamp, } +#[reducer] +fn resolve_and_update_customer_for_payment(ctx: &ReducerContext, request: PaymentRequest) -> Result { + let _timer = LogStopwatch::new("resolve_and_update_customer_for_payment"); + let customer = resolve_customer( + ctx, + request.customer_warehouse_id, + request.customer_district_id, + &request.customer_selector, + )?; + Ok(update_customer(ctx, &request, customer)) +} + +fn call_remote_resolve_and_update_customer_for_payment( + ctx: &ReducerContext, + remote_database_identity: Identity, + request: &PaymentRequest, +) -> Result { + call_remote_reducer( + ctx, + remote_database_identity, + "resolve_and_update_customer_for_payment", + request, + ) +} + #[procedure] fn process_remote_payment(ctx: &mut ProcedureContext, request: PaymentRequest) -> Result { ctx.try_with_tx(|tx| { @@ -157,7 +149,7 @@ fn process_remote_payment(ctx: &mut ProcedureContext, request: PaymentRequest) - }) } -fn update_customer(tx: &TxContext, request: &PaymentRequest, customer: Customer) -> Customer { +fn update_customer(tx: &ReducerContext, request: &PaymentRequest, customer: Customer) -> Customer { let mut updated_customer = Customer { c_balance_cents: customer.c_balance_cents - request.payment_amount_cents, c_ytd_payment_cents: customer.c_ytd_payment_cents + request.payment_amount_cents, diff --git a/modules/tpcc/src/remote.rs b/modules/tpcc/src/remote.rs index bea5a610852..68a183d8924 100644 --- a/modules/tpcc/src/remote.rs +++ b/modules/tpcc/src/remote.rs @@ -1,31 +1,11 @@ -use std::time::Duration; - -use http::Request; use spacetimedb::{ - http::Timeout, reducer, table, Identity, ProcedureContext, ReducerContext, Serialize, Table, TimeDuration, - TxContext, + reducer, remote_reducer::call_reducer_on_db, table, DeserializeOwned, Identity, ReducerContext, Serialize, + SpacetimeType, Table, }; use spacetimedb_sats::bsatn; use crate::WarehouseId; -#[table(accessor = spacetimedb_uri)] -struct SpacetimeDbUri { - uri: String, -} - -#[reducer] -fn set_spacetimedb_uri(ctx: &ReducerContext, uri: String) { - for row in ctx.db.spacetimedb_uri().iter() { - ctx.db.spacetimedb_uri().delete(row); - } - ctx.db.spacetimedb_uri().insert(SpacetimeDbUri { uri }); -} - -pub fn get_spacetimedb_uri(tx: &TxContext) -> String { - tx.db.spacetimedb_uri().iter().next().unwrap().uri -} - /// For warehouses not managed by this database, stores the [`Identity`] of the remote database which manages that warehouse. /// /// Will not have a row present for a warehouse managed by the local database. @@ -58,41 +38,24 @@ pub fn remote_warehouse_home(ctx: &ReducerContext, warehouse_id: WarehouseId) -> .map(|row| row.remote_database_home) } -pub fn call_remote_function( - ctx: &mut ProcedureContext, - spacetimedb_uri: &str, +pub fn call_remote_reducer( + _ctx: &ReducerContext, database_ident: Identity, - function_name: &str, - arguments: impl Serialize, -) -> Result { - let request = Request::builder() - .uri(format!( - "{spacetimedb_uri}/v1/database/{database_ident}/call/{function_name}" - )) - .method("POST") - .header("Content-Type", "application/octet-stream") - // This absurdly long timeout will be clamped by the host to 3 minutes. - .extension(Timeout::from(TimeDuration::from_duration(Duration::from_hours(1)))) - // TODO(auth): include a token. - .body(bsatn::to_vec(&arguments).map_err(|e| format!("Failed to BSATN-serialize arguments: {e}"))?) - .map_err(|e| format!("Error constructing `Request`: {e}"))?; - log::debug!("Sending remote request to run {function_name} on {spacetimedb_uri} / {database_ident}"); - match ctx.http.send(request) { - Err(e) => { - let msg = format!("Error sending request to remote database {database_ident} at URI {spacetimedb_uri} to call {function_name}: {e}"); - log::error!("{}", msg); - Err(msg) - } - Ok(response) if response.status() != http::status::StatusCode::OK => { - let msg = format!("Got non-200 response code {} from request to remote database {database_ident} at URI {spacetimedb_uri} when calling {function_name}: {}", response.status(), response.into_body().into_string_lossy()); - log::error!("{}", msg); - Err(msg) - } - Ok(response) => { - log::debug!( - "Got successful response from {spacetimedb_uri} / {database_ident} when running {function_name}" - ); - Ok(response.into_body()) - } - } + reducer_name: &str, + args: &Args, +) -> Result +where + Args: SpacetimeType + Serialize, + Output: SpacetimeType + DeserializeOwned, +{ + let args = bsatn::to_vec(args).map_err(|e| { + format!("Failed to BSATN-serialize args for remote reducer {reducer_name} on database {database_ident}: {e}") + })?; + let out = call_reducer_on_db(database_ident, reducer_name, &args) + .map_err(|e| format!("Failed to call remote reducer {reducer_name} on database {database_ident}: {e}"))?; + bsatn::from_slice(&out).map_err(|e| { + format!( + "Failed to BSATN-deserialize result from remote reducer {reducer_name} on database {database_ident}: {e}" + ) + }) } diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs index ef58bc51c6f..45de00c1232 100644 --- a/tools/tpcc-runner/src/client.rs +++ b/tools/tpcc-runner/src/client.rs @@ -47,22 +47,6 @@ impl ModuleClient { }) } - pub fn set_spacetimedb_uri(&self, uri: &str) -> Result<()> { - let (tx, rx) = sync_channel(1); - self.conn - .reducers - .set_spacetimedb_uri_then(uri.to_string(), move |_, res| { - log::debug!("Got response from `set_spacetimedb_uri`: {res:?}"); - let _ = tx.send(res); - })?; - match rx.recv_timeout(self.timeout) { - Ok(Ok(Ok(()))) => Ok(()), - Ok(Ok(Err(message))) => bail!("set_spacetimedb_uri failed: {}", message), - Ok(Err(err)) => Err(anyhow!("set_spacetimedb_uri internal error: {}", err)), - Err(_) => bail!("timed out waiting for set_spacetimedb_uri"), - } - } - pub fn reset_tpcc(&self) -> Result<()> { let (tx, rx) = sync_channel(1); self.conn.reducers.reset_tpcc_then(move |_, res| { @@ -86,14 +70,10 @@ impl ModuleClient { increment_pending(pending); let pending_for_callback = Arc::clone(pending); let errors = Arc::clone(errors); - if let Err(err) = self - .conn - .reducers - .load_remote_warehouses_then(rows, move |_, res| { - handle_reducer_result("load_remote_warehouses", res, &errors); - decrement_pending(&pending_for_callback); - }) - { + if let Err(err) = self.conn.reducers.load_remote_warehouses_then(rows, move |_, res| { + handle_reducer_result("load_remote_warehouses", res, &errors); + decrement_pending(&pending_for_callback); + }) { decrement_pending(pending); return Err(anyhow!("load_remote_warehouses send error: {err}")); } @@ -280,11 +260,11 @@ impl ModuleClient { ) -> Result> { let (tx, rx) = sync_channel(1); self.conn - .procedures + .reducers .new_order_then(w_id, d_id, c_id, order_lines, move |_, res| { log::debug!("Got response from `new_order`: {res:?}"); let _ = tx.send(res); - }); + })?; match rx.recv_timeout(self.timeout) { Ok(Ok(value)) => Ok(value), Ok(Err(err)) => Err(anyhow!("new_order internal error: {}", err)), @@ -302,7 +282,7 @@ impl ModuleClient { payment_amount_cents: i64, ) -> Result> { let (tx, rx) = sync_channel(1); - self.conn.procedures.payment_then( + self.conn.reducers.payment_then( w_id, d_id, c_w_id, @@ -313,7 +293,7 @@ impl ModuleClient { log::debug!("Got response from `payment`: {res:?}"); let _ = tx.send(res); }, - ); + )?; match rx.recv_timeout(self.timeout) { Ok(Ok(value)) => Ok(value), Ok(Err(err)) => Err(anyhow!("payment internal error: {}", err)), @@ -329,11 +309,11 @@ impl ModuleClient { ) -> Result> { let (tx, rx) = sync_channel(1); self.conn - .procedures + .reducers .order_status_then(w_id, d_id, customer, move |_, res| { log::debug!("Got response from `order_status`: {res:?}"); let _ = tx.send(res); - }); + })?; match rx.recv_timeout(self.timeout) { Ok(Ok(value)) => Ok(value), Ok(Err(err)) => Err(anyhow!("order_status internal error: {}", err)), @@ -344,11 +324,11 @@ impl ModuleClient { pub fn stock_level(&self, w_id: u16, d_id: u8, threshold: i32) -> Result> { let (tx, rx) = sync_channel(1); self.conn - .procedures + .reducers .stock_level_then(w_id, d_id, threshold, move |_, res| { log::debug!("Got response from `stock_level`: {res:?}"); let _ = tx.send(res); - }); + })?; match rx.recv_timeout(self.timeout) { Ok(Ok(value)) => Ok(value), Ok(Err(err)) => Err(anyhow!("stock_level internal error: {}", err)), @@ -366,7 +346,7 @@ impl ModuleClient { carrier_id: u8, ) -> Result> { let (tx, rx) = sync_channel(1); - self.conn.procedures.queue_delivery_then( + self.conn.reducers.queue_delivery_then( run_id, driver_id, terminal_id, @@ -377,7 +357,7 @@ impl ModuleClient { log::debug!("Got response from `queue_delivery`: {res:?}"); let _ = tx.send(res); }, - ); + )?; match rx.recv_timeout(self.timeout) { Ok(Ok(value)) => Ok(value), Ok(Err(err)) => Err(anyhow!("queue_delivery internal error: {}", err)), @@ -387,10 +367,10 @@ impl ModuleClient { pub fn delivery_progress(&self, run_id: String) -> Result> { let (tx, rx) = sync_channel(1); - self.conn.procedures.delivery_progress_then(run_id, move |_, res| { + self.conn.reducers.delivery_progress_then(run_id, move |_, res| { log::debug!("Got response from `delivery_progress`: {res:?}"); let _ = tx.send(res); - }); + })?; match rx.recv_timeout(self.timeout) { Ok(Ok(value)) => Ok(value), Ok(Err(err)) => Err(anyhow!("delivery_progress internal error: {}", err)), @@ -406,11 +386,11 @@ impl ModuleClient { ) -> Result, String>> { let (tx, rx) = sync_channel(1); self.conn - .procedures + .reducers .fetch_delivery_completions_then(run_id, after_completion_id, limit, move |_, res| { log::debug!("Got response from `fetch_delivery_completions`: {res:?}"); let _ = tx.send(res); - }); + })?; match rx.recv_timeout(self.timeout) { Ok(Ok(value)) => Ok(value), Ok(Err(err)) => Err(anyhow!("fetch_delivery_completions internal error: {}", err)), diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index 19b3ab27175..dd16a87d63c 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -83,8 +83,6 @@ fn configure_one_database(config: &LoadConfig, database_number: u16, topology: & client.reset_tpcc().context("failed to reset tpcc data")?; } - client.set_spacetimedb_uri(&config.connection.uri)?; - let mut rng = StdRng::seed_from_u64(0x5eed_5eed); let load_c_last = rng.random_range(0..=255); let base_ts = Timestamp::from(SystemTime::now()); @@ -430,11 +428,7 @@ fn load_customers_history_orders( ol_dist_info: alpha_string(rng, 24, 24), }); if order_line_batch.len() >= batch_size { - client.queue_load_order_lines( - std::mem::take(&mut order_line_batch), - &pending, - &errors, - )?; + client.queue_load_order_lines(std::mem::take(&mut order_line_batch), &pending, &errors)?; } } diff --git a/tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs b/tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs deleted file mode 100644 index 1f449f2ff45..00000000000 --- a/tools/tpcc-runner/src/module_bindings/confirm_item_reservation_reducer.rs +++ /dev/null @@ -1,68 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -pub(super) struct ConfirmItemReservationArgs { - pub rollback_token: u64, -} - -impl From for super::Reducer { - fn from(args: ConfirmItemReservationArgs) -> Self { - Self::ConfirmItemReservation { - rollback_token: args.rollback_token, - } - } -} - -impl __sdk::InModule for ConfirmItemReservationArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the reducer `confirm_item_reservation`. -/// -/// Implemented for [`super::RemoteReducers`]. -pub trait confirm_item_reservation { - /// Request that the remote module invoke the reducer `confirm_item_reservation` to run as soon as possible. - /// - /// This method returns immediately, and errors only if we are unable to send the request. - /// The reducer will run asynchronously in the future, - /// and this method provides no way to listen for its completion status. - /// /// Use [`confirm_item_reservation:confirm_item_reservation_then`] to run a callback after the reducer completes. - fn confirm_item_reservation(&self, rollback_token: u64) -> __sdk::Result<()> { - self.confirm_item_reservation_then(rollback_token, |_, _| {}) - } - - /// Request that the remote module invoke the reducer `confirm_item_reservation` to run as soon as possible, - /// registering `callback` to run when we are notified that the reducer completed. - /// - /// This method returns immediately, and errors only if we are unable to send the request. - /// The reducer will run asynchronously in the future, - /// and its status can be observed with the `callback`. - fn confirm_item_reservation_then( - &self, - rollback_token: u64, - - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) -> __sdk::Result<()>; -} - -impl confirm_item_reservation for super::RemoteReducers { - fn confirm_item_reservation_then( - &self, - rollback_token: u64, - - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) -> __sdk::Result<()> { - self.imp - .invoke_reducer_with_callback(ConfirmItemReservationArgs { rollback_token }, callback) - } -} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs b/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs deleted file mode 100644 index 697c941b658..00000000000 --- a/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs +++ /dev/null @@ -1,54 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::delivery_progress_type::DeliveryProgress; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct DeliveryProgressArgs { - pub run_id: String, -} - -impl __sdk::InModule for DeliveryProgressArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `delivery_progress`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait delivery_progress { - fn delivery_progress(&self, run_id: String) { - self.delivery_progress_then(run_id, |_, _| {}); - } - - fn delivery_progress_then( - &self, - run_id: String, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ); -} - -impl delivery_progress for super::RemoteProcedures { - fn delivery_progress_then( - &self, - run_id: String, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result>( - "delivery_progress", - DeliveryProgressArgs { run_id }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/set_spacetimedb_uri_reducer.rs b/tools/tpcc-runner/src/module_bindings/delivery_progress_reducer.rs similarity index 52% rename from tools/tpcc-runner/src/module_bindings/set_spacetimedb_uri_reducer.rs rename to tools/tpcc-runner/src/module_bindings/delivery_progress_reducer.rs index 52266e982ff..98849d69764 100644 --- a/tools/tpcc-runner/src/module_bindings/set_spacetimedb_uri_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/delivery_progress_reducer.rs @@ -4,63 +4,65 @@ #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; +use super::delivery_progress_type::DeliveryProgress; + #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub(super) struct SetSpacetimedbUriArgs { - pub uri: String, +pub(super) struct DeliveryProgressArgs { + pub run_id: String, } -impl From for super::Reducer { - fn from(args: SetSpacetimedbUriArgs) -> Self { - Self::SetSpacetimedbUri { uri: args.uri } +impl From for super::Reducer { + fn from(args: DeliveryProgressArgs) -> Self { + Self::DeliveryProgress { run_id: args.run_id } } } -impl __sdk::InModule for SetSpacetimedbUriArgs { +impl __sdk::InModule for DeliveryProgressArgs { type Module = super::RemoteModule; } #[allow(non_camel_case_types)] -/// Extension trait for access to the reducer `set_spacetimedb_uri`. +/// Extension trait for access to the reducer `delivery_progress`. /// /// Implemented for [`super::RemoteReducers`]. -pub trait set_spacetimedb_uri { - /// Request that the remote module invoke the reducer `set_spacetimedb_uri` to run as soon as possible. +pub trait delivery_progress { + /// Request that the remote module invoke the reducer `delivery_progress` to run as soon as possible. /// /// This method returns immediately, and errors only if we are unable to send the request. /// The reducer will run asynchronously in the future, /// and this method provides no way to listen for its completion status. - /// /// Use [`set_spacetimedb_uri:set_spacetimedb_uri_then`] to run a callback after the reducer completes. - fn set_spacetimedb_uri(&self, uri: String) -> __sdk::Result<()> { - self.set_spacetimedb_uri_then(uri, |_, _| {}) + /// /// Use [`delivery_progress:delivery_progress_then`] to run a callback after the reducer completes. + fn delivery_progress(&self, run_id: String) -> __sdk::Result<()> { + self.delivery_progress_then(run_id, |_, _| {}) } - /// Request that the remote module invoke the reducer `set_spacetimedb_uri` to run as soon as possible, + /// Request that the remote module invoke the reducer `delivery_progress` to run as soon as possible, /// registering `callback` to run when we are notified that the reducer completed. /// /// This method returns immediately, and errors only if we are unable to send the request. /// The reducer will run asynchronously in the future, /// and its status can be observed with the `callback`. - fn set_spacetimedb_uri_then( + fn delivery_progress_then( &self, - uri: String, + run_id: String, - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + Send + 'static, ) -> __sdk::Result<()>; } -impl set_spacetimedb_uri for super::RemoteReducers { - fn set_spacetimedb_uri_then( +impl delivery_progress for super::RemoteReducers { + fn delivery_progress_then( &self, - uri: String, + run_id: String, - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + Send + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(SetSpacetimedbUriArgs { uri }, callback) + .invoke_reducer_with_callback::<_, DeliveryProgress>(DeliveryProgressArgs { run_id }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs b/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs deleted file mode 100644 index 23760be1e9a..00000000000 --- a/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs +++ /dev/null @@ -1,68 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::delivery_completion_view_type::DeliveryCompletionView; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct FetchDeliveryCompletionsArgs { - pub run_id: String, - pub after_completion_id: u64, - pub limit: u32, -} - -impl __sdk::InModule for FetchDeliveryCompletionsArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `fetch_delivery_completions`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait fetch_delivery_completions { - fn fetch_delivery_completions(&self, run_id: String, after_completion_id: u64, limit: u32) { - self.fetch_delivery_completions_then(run_id, after_completion_id, limit, |_, _| {}); - } - - fn fetch_delivery_completions_then( - &self, - run_id: String, - after_completion_id: u64, - limit: u32, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, String>, __sdk::InternalError>, - ) + Send - + 'static, - ); -} - -impl fetch_delivery_completions for super::RemoteProcedures { - fn fetch_delivery_completions_then( - &self, - run_id: String, - after_completion_id: u64, - limit: u32, - - __callback: impl FnOnce( - &super::ProcedureEventContext, - Result, String>, __sdk::InternalError>, - ) + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result, String>>( - "fetch_delivery_completions", - FetchDeliveryCompletionsArgs { - run_id, - after_completion_id, - limit, - }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_reducer.rs b/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_reducer.rs new file mode 100644 index 00000000000..79d9fc1c326 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_reducer.rs @@ -0,0 +1,88 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_completion_view_type::DeliveryCompletionView; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct FetchDeliveryCompletionsArgs { + pub run_id: String, + pub after_completion_id: u64, + pub limit: u32, +} + +impl From for super::Reducer { + fn from(args: FetchDeliveryCompletionsArgs) -> Self { + Self::FetchDeliveryCompletions { + run_id: args.run_id, + after_completion_id: args.after_completion_id, + limit: args.limit, + } + } +} + +impl __sdk::InModule for FetchDeliveryCompletionsArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `fetch_delivery_completions`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait fetch_delivery_completions { + /// Request that the remote module invoke the reducer `fetch_delivery_completions` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`fetch_delivery_completions:fetch_delivery_completions_then`] to run a callback after the reducer completes. + fn fetch_delivery_completions(&self, run_id: String, after_completion_id: u64, limit: u32) -> __sdk::Result<()> { + self.fetch_delivery_completions_then(run_id, after_completion_id, limit, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `fetch_delivery_completions` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn fetch_delivery_completions_then( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + + callback: impl FnOnce( + &super::ReducerEventContext, + Result, String>, __sdk::InternalError>, + ) + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl fetch_delivery_completions for super::RemoteReducers { + fn fetch_delivery_completions_then( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + + callback: impl FnOnce( + &super::ReducerEventContext, + Result, String>, __sdk::InternalError>, + ) + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback::<_, Vec>( + FetchDeliveryCompletionsArgs { + run_id, + after_completion_id, + limit, + }, + callback, + ) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs index 68000e9611b..70a57c376ca 100644 --- a/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs @@ -63,6 +63,6 @@ impl load_customers for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(LoadCustomersArgs { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(LoadCustomersArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs index 4d9e6c75cd2..dfa86c9d781 100644 --- a/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs @@ -63,6 +63,6 @@ impl load_districts for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(LoadDistrictsArgs { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(LoadDistrictsArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs index 73517bccebe..21cdfad028e 100644 --- a/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs @@ -63,6 +63,6 @@ impl load_history for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(LoadHistoryArgs { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(LoadHistoryArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs index 7cc306270ae..50e9efd1488 100644 --- a/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs @@ -62,6 +62,7 @@ impl load_items for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(LoadItemsArgs { rows }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(LoadItemsArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs index d79b6550953..affacbc1eae 100644 --- a/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs @@ -63,6 +63,6 @@ impl load_new_orders for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(LoadNewOrdersArgs { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(LoadNewOrdersArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs index 189f862f8a5..17484ad9f46 100644 --- a/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs @@ -63,6 +63,6 @@ impl load_order_lines for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(LoadOrderLinesArgs { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(LoadOrderLinesArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs index a72bb0a9235..90353ffe98f 100644 --- a/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs @@ -62,6 +62,7 @@ impl load_orders for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(LoadOrdersArgs { rows }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(LoadOrdersArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs index f54aa582640..e25f2ee1977 100644 --- a/tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_remote_warehouses_reducer.rs @@ -63,6 +63,6 @@ impl load_remote_warehouses for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(LoadRemoteWarehousesArgs { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(LoadRemoteWarehousesArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs index 89d3f80bf7b..06e9587d815 100644 --- a/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs @@ -62,6 +62,7 @@ impl load_stocks for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(LoadStocksArgs { rows }, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(LoadStocksArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs index b6986a465b0..2fdae2bd60e 100644 --- a/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs @@ -63,6 +63,6 @@ impl load_warehouses for super::RemoteReducers { + 'static, ) -> __sdk::Result<()> { self.imp - .invoke_reducer_with_callback(LoadWarehousesArgs { rows }, callback) + .invoke_reducer_with_callback::<_, ()>(LoadWarehousesArgs { rows }, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/mod.rs b/tools/tpcc-runner/src/module_bindings/mod.rs index a3d2e2bbdd7..05e883aaf3f 100644 --- a/tools/tpcc-runner/src/module_bindings/mod.rs +++ b/tools/tpcc-runner/src/module_bindings/mod.rs @@ -1,22 +1,21 @@ // THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE // WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. -// This was generated using spacetimedb cli version 2.1.0 (commit 0a24939b80db8d4a743625fb64c7c333eca35479). +// This was generated using spacetimedb cli version 2.1.0 (commit c7550a1c918b90c6bc5c175733e960cee24cb5c2-dirty). #![allow(unused, clippy::all)] use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; -pub mod confirm_item_reservation_reducer; pub mod customer_selector_type; pub mod customer_type; pub mod delivery_completion_type; pub mod delivery_completion_view_type; pub mod delivery_job_type; -pub mod delivery_progress_procedure; +pub mod delivery_progress_reducer; pub mod delivery_progress_type; pub mod delivery_queue_ack_type; pub mod district_type; -pub mod fetch_delivery_completions_procedure; +pub mod fetch_delivery_completions_reducer; pub mod history_type; pub mod item_type; pub mod load_customers_reducer; @@ -31,45 +30,41 @@ pub mod load_stocks_reducer; pub mod load_warehouses_reducer; pub mod new_order_line_input_type; pub mod new_order_line_result_type; -pub mod new_order_procedure; +pub mod new_order_reducer; pub mod new_order_result_type; pub mod new_order_type; pub mod o_order_type; +pub mod order_item_and_decrement_stock_reducer; +pub mod order_item_input_type; +pub mod order_item_output_type; pub mod order_line_type; pub mod order_status_line_result_type; -pub mod order_status_procedure; +pub mod order_status_reducer; pub mod order_status_result_type; -pub mod payment_procedure; +pub mod payment_reducer; pub mod payment_request_type; pub mod payment_result_type; pub mod process_remote_payment_procedure; -pub mod queue_delivery_procedure; +pub mod queue_delivery_reducer; pub mod remote_warehouse_type; -pub mod reserve_item_for_remote_order_procedure; -pub mod reserve_item_input_type; -pub mod reserve_item_output_type; -pub mod reserved_item_log_type; pub mod reset_tpcc_reducer; -pub mod rollback_item_reservation_reducer; -pub mod set_spacetimedb_uri_reducer; -pub mod spacetime_db_uri_type; -pub mod stock_level_procedure; +pub mod resolve_and_update_customer_for_payment_reducer; +pub mod stock_level_reducer; pub mod stock_level_result_type; pub mod stock_type; pub mod test_procedure; pub mod warehouse_type; -pub use confirm_item_reservation_reducer::confirm_item_reservation; pub use customer_selector_type::CustomerSelector; pub use customer_type::Customer; pub use delivery_completion_type::DeliveryCompletion; pub use delivery_completion_view_type::DeliveryCompletionView; pub use delivery_job_type::DeliveryJob; -pub use delivery_progress_procedure::delivery_progress; +pub use delivery_progress_reducer::delivery_progress; pub use delivery_progress_type::DeliveryProgress; pub use delivery_queue_ack_type::DeliveryQueueAck; pub use district_type::District; -pub use fetch_delivery_completions_procedure::fetch_delivery_completions; +pub use fetch_delivery_completions_reducer::fetch_delivery_completions; pub use history_type::History; pub use item_type::Item; pub use load_customers_reducer::load_customers; @@ -84,29 +79,26 @@ pub use load_stocks_reducer::load_stocks; pub use load_warehouses_reducer::load_warehouses; pub use new_order_line_input_type::NewOrderLineInput; pub use new_order_line_result_type::NewOrderLineResult; -pub use new_order_procedure::new_order; +pub use new_order_reducer::new_order; pub use new_order_result_type::NewOrderResult; pub use new_order_type::NewOrder; pub use o_order_type::OOrder; +pub use order_item_and_decrement_stock_reducer::order_item_and_decrement_stock; +pub use order_item_input_type::OrderItemInput; +pub use order_item_output_type::OrderItemOutput; pub use order_line_type::OrderLine; pub use order_status_line_result_type::OrderStatusLineResult; -pub use order_status_procedure::order_status; +pub use order_status_reducer::order_status; pub use order_status_result_type::OrderStatusResult; -pub use payment_procedure::payment; +pub use payment_reducer::payment; pub use payment_request_type::PaymentRequest; pub use payment_result_type::PaymentResult; pub use process_remote_payment_procedure::process_remote_payment; -pub use queue_delivery_procedure::queue_delivery; +pub use queue_delivery_reducer::queue_delivery; pub use remote_warehouse_type::RemoteWarehouse; -pub use reserve_item_for_remote_order_procedure::reserve_item_for_remote_order; -pub use reserve_item_input_type::ReserveItemInput; -pub use reserve_item_output_type::ReserveItemOutput; -pub use reserved_item_log_type::ReservedItemLog; pub use reset_tpcc_reducer::reset_tpcc; -pub use rollback_item_reservation_reducer::rollback_item_reservation; -pub use set_spacetimedb_uri_reducer::set_spacetimedb_uri; -pub use spacetime_db_uri_type::SpacetimeDbUri; -pub use stock_level_procedure::stock_level; +pub use resolve_and_update_customer_for_payment_reducer::resolve_and_update_customer_for_payment; +pub use stock_level_reducer::stock_level; pub use stock_level_result_type::StockLevelResult; pub use stock_type::Stock; pub use test_procedure::test; @@ -120,20 +112,83 @@ pub use warehouse_type::Warehouse; /// to indicate which reducer caused the event. pub enum Reducer { - ConfirmItemReservation { rollback_token: u64 }, - LoadCustomers { rows: Vec }, - LoadDistricts { rows: Vec }, - LoadHistory { rows: Vec }, - LoadItems { rows: Vec }, - LoadNewOrders { rows: Vec }, - LoadOrderLines { rows: Vec }, - LoadOrders { rows: Vec }, - LoadRemoteWarehouses { rows: Vec }, - LoadStocks { rows: Vec }, - LoadWarehouses { rows: Vec }, + DeliveryProgress { + run_id: String, + }, + FetchDeliveryCompletions { + run_id: String, + after_completion_id: u64, + limit: u32, + }, + LoadCustomers { + rows: Vec, + }, + LoadDistricts { + rows: Vec, + }, + LoadHistory { + rows: Vec, + }, + LoadItems { + rows: Vec, + }, + LoadNewOrders { + rows: Vec, + }, + LoadOrderLines { + rows: Vec, + }, + LoadOrders { + rows: Vec, + }, + LoadRemoteWarehouses { + rows: Vec, + }, + LoadStocks { + rows: Vec, + }, + LoadWarehouses { + rows: Vec, + }, + NewOrder { + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + }, + OrderItemAndDecrementStock { + input: OrderItemInput, + }, + OrderStatus { + w_id: u16, + d_id: u8, + customer: CustomerSelector, + }, + Payment { + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + }, + QueueDelivery { + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + }, ResetTpcc, - RollbackItemReservation { rollback_token: u64 }, - SetSpacetimedbUri { uri: String }, + ResolveAndUpdateCustomerForPayment { + request: PaymentRequest, + }, + StockLevel { + w_id: u16, + d_id: u8, + threshold: i32, + }, } impl __sdk::InModule for Reducer { @@ -143,7 +198,8 @@ impl __sdk::InModule for Reducer { impl __sdk::Reducer for Reducer { fn reducer_name(&self) -> &'static str { match self { - Reducer::ConfirmItemReservation { .. } => "confirm_item_reservation", + Reducer::DeliveryProgress { .. } => "delivery_progress", + Reducer::FetchDeliveryCompletions { .. } => "fetch_delivery_completions", Reducer::LoadCustomers { .. } => "load_customers", Reducer::LoadDistricts { .. } => "load_districts", Reducer::LoadHistory { .. } => "load_history", @@ -154,20 +210,32 @@ impl __sdk::Reducer for Reducer { Reducer::LoadRemoteWarehouses { .. } => "load_remote_warehouses", Reducer::LoadStocks { .. } => "load_stocks", Reducer::LoadWarehouses { .. } => "load_warehouses", + Reducer::NewOrder { .. } => "new_order", + Reducer::OrderItemAndDecrementStock { .. } => "order_item_and_decrement_stock", + Reducer::OrderStatus { .. } => "order_status", + Reducer::Payment { .. } => "payment", + Reducer::QueueDelivery { .. } => "queue_delivery", Reducer::ResetTpcc => "reset_tpcc", - Reducer::RollbackItemReservation { .. } => "rollback_item_reservation", - Reducer::SetSpacetimedbUri { .. } => "set_spacetimedb_uri", + Reducer::ResolveAndUpdateCustomerForPayment { .. } => "resolve_and_update_customer_for_payment", + Reducer::StockLevel { .. } => "stock_level", _ => unreachable!(), } } #[allow(clippy::clone_on_copy)] fn args_bsatn(&self) -> Result, __sats::bsatn::EncodeError> { match self { - Reducer::ConfirmItemReservation { rollback_token } => { - __sats::bsatn::to_vec(&confirm_item_reservation_reducer::ConfirmItemReservationArgs { - rollback_token: rollback_token.clone(), - }) + Reducer::DeliveryProgress { run_id } => { + __sats::bsatn::to_vec(&delivery_progress_reducer::DeliveryProgressArgs { run_id: run_id.clone() }) } + Reducer::FetchDeliveryCompletions { + run_id, + after_completion_id, + limit, + } => __sats::bsatn::to_vec(&fetch_delivery_completions_reducer::FetchDeliveryCompletionsArgs { + run_id: run_id.clone(), + after_completion_id: after_completion_id.clone(), + limit: limit.clone(), + }), Reducer::LoadCustomers { rows } => { __sats::bsatn::to_vec(&load_customers_reducer::LoadCustomersArgs { rows: rows.clone() }) } @@ -198,14 +266,69 @@ impl __sdk::Reducer for Reducer { Reducer::LoadWarehouses { rows } => { __sats::bsatn::to_vec(&load_warehouses_reducer::LoadWarehousesArgs { rows: rows.clone() }) } - Reducer::ResetTpcc => __sats::bsatn::to_vec(&reset_tpcc_reducer::ResetTpccArgs {}), - Reducer::RollbackItemReservation { rollback_token } => { - __sats::bsatn::to_vec(&rollback_item_reservation_reducer::RollbackItemReservationArgs { - rollback_token: rollback_token.clone(), + Reducer::NewOrder { + w_id, + d_id, + c_id, + order_lines, + } => __sats::bsatn::to_vec(&new_order_reducer::NewOrderArgs { + w_id: w_id.clone(), + d_id: d_id.clone(), + c_id: c_id.clone(), + order_lines: order_lines.clone(), + }), + Reducer::OrderItemAndDecrementStock { input } => __sats::bsatn::to_vec( + &order_item_and_decrement_stock_reducer::OrderItemAndDecrementStockArgs { input: input.clone() }, + ), + Reducer::OrderStatus { w_id, d_id, customer } => { + __sats::bsatn::to_vec(&order_status_reducer::OrderStatusArgs { + w_id: w_id.clone(), + d_id: d_id.clone(), + customer: customer.clone(), }) } - Reducer::SetSpacetimedbUri { uri } => { - __sats::bsatn::to_vec(&set_spacetimedb_uri_reducer::SetSpacetimedbUriArgs { uri: uri.clone() }) + Reducer::Payment { + w_id, + d_id, + c_w_id, + c_d_id, + customer, + payment_amount_cents, + } => __sats::bsatn::to_vec(&payment_reducer::PaymentArgs { + w_id: w_id.clone(), + d_id: d_id.clone(), + c_w_id: c_w_id.clone(), + c_d_id: c_d_id.clone(), + customer: customer.clone(), + payment_amount_cents: payment_amount_cents.clone(), + }), + Reducer::QueueDelivery { + run_id, + driver_id, + terminal_id, + request_id, + w_id, + carrier_id, + } => __sats::bsatn::to_vec(&queue_delivery_reducer::QueueDeliveryArgs { + run_id: run_id.clone(), + driver_id: driver_id.clone(), + terminal_id: terminal_id.clone(), + request_id: request_id.clone(), + w_id: w_id.clone(), + carrier_id: carrier_id.clone(), + }), + Reducer::ResetTpcc => __sats::bsatn::to_vec(&reset_tpcc_reducer::ResetTpccArgs {}), + Reducer::ResolveAndUpdateCustomerForPayment { request } => __sats::bsatn::to_vec( + &resolve_and_update_customer_for_payment_reducer::ResolveAndUpdateCustomerForPaymentArgs { + request: request.clone(), + }, + ), + Reducer::StockLevel { w_id, d_id, threshold } => { + __sats::bsatn::to_vec(&stock_level_reducer::StockLevelArgs { + w_id: w_id.clone(), + d_id: d_id.clone(), + threshold: threshold.clone(), + }) } _ => unreachable!(), } diff --git a/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs b/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs deleted file mode 100644 index 03bf82d7d47..00000000000 --- a/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs +++ /dev/null @@ -1,69 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::new_order_line_input_type::NewOrderLineInput; -use super::new_order_result_type::NewOrderResult; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct NewOrderArgs { - pub w_id: u16, - pub d_id: u8, - pub c_id: u32, - pub order_lines: Vec, -} - -impl __sdk::InModule for NewOrderArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `new_order`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait new_order { - fn new_order(&self, w_id: u16, d_id: u8, c_id: u32, order_lines: Vec) { - self.new_order_then(w_id, d_id, c_id, order_lines, |_, _| {}); - } - - fn new_order_then( - &self, - w_id: u16, - d_id: u8, - c_id: u32, - order_lines: Vec, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ); -} - -impl new_order for super::RemoteProcedures { - fn new_order_then( - &self, - w_id: u16, - d_id: u8, - c_id: u32, - order_lines: Vec, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result>( - "new_order", - NewOrderArgs { - w_id, - d_id, - c_id, - order_lines, - }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_reducer.rs b/tools/tpcc-runner/src/module_bindings/new_order_reducer.rs new file mode 100644 index 00000000000..af4914ae2d5 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_reducer.rs @@ -0,0 +1,90 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_line_input_type::NewOrderLineInput; +use super::new_order_result_type::NewOrderResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct NewOrderArgs { + pub w_id: u16, + pub d_id: u8, + pub c_id: u32, + pub order_lines: Vec, +} + +impl From for super::Reducer { + fn from(args: NewOrderArgs) -> Self { + Self::NewOrder { + w_id: args.w_id, + d_id: args.d_id, + c_id: args.c_id, + order_lines: args.order_lines, + } + } +} + +impl __sdk::InModule for NewOrderArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `new_order`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait new_order { + /// Request that the remote module invoke the reducer `new_order` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`new_order:new_order_then`] to run a callback after the reducer completes. + fn new_order(&self, w_id: u16, d_id: u8, c_id: u32, order_lines: Vec) -> __sdk::Result<()> { + self.new_order_then(w_id, d_id, c_id, order_lines, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `new_order` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn new_order_then( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl new_order for super::RemoteReducers { + fn new_order_then( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback::<_, NewOrderResult>( + NewOrderArgs { + w_id, + d_id, + c_id, + order_lines, + }, + callback, + ) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/order_item_and_decrement_stock_reducer.rs b/tools/tpcc-runner/src/module_bindings/order_item_and_decrement_stock_reducer.rs new file mode 100644 index 00000000000..6aadae2368c --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_item_and_decrement_stock_reducer.rs @@ -0,0 +1,69 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::order_item_input_type::OrderItemInput; +use super::order_item_output_type::OrderItemOutput; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct OrderItemAndDecrementStockArgs { + pub input: OrderItemInput, +} + +impl From for super::Reducer { + fn from(args: OrderItemAndDecrementStockArgs) -> Self { + Self::OrderItemAndDecrementStock { input: args.input } + } +} + +impl __sdk::InModule for OrderItemAndDecrementStockArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `order_item_and_decrement_stock`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait order_item_and_decrement_stock { + /// Request that the remote module invoke the reducer `order_item_and_decrement_stock` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`order_item_and_decrement_stock:order_item_and_decrement_stock_then`] to run a callback after the reducer completes. + fn order_item_and_decrement_stock(&self, input: OrderItemInput) -> __sdk::Result<()> { + self.order_item_and_decrement_stock_then(input, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `order_item_and_decrement_stock` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn order_item_and_decrement_stock_then( + &self, + input: OrderItemInput, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl order_item_and_decrement_stock for super::RemoteReducers { + fn order_item_and_decrement_stock_then( + &self, + input: OrderItemInput, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback::<_, OrderItemOutput>(OrderItemAndDecrementStockArgs { input }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/reserve_item_input_type.rs b/tools/tpcc-runner/src/module_bindings/order_item_input_type.rs similarity index 82% rename from tools/tpcc-runner/src/module_bindings/reserve_item_input_type.rs rename to tools/tpcc-runner/src/module_bindings/order_item_input_type.rs index cbc3507fb38..444115ca51d 100644 --- a/tools/tpcc-runner/src/module_bindings/reserve_item_input_type.rs +++ b/tools/tpcc-runner/src/module_bindings/order_item_input_type.rs @@ -8,11 +8,12 @@ use super::new_order_line_input_type::NewOrderLineInput; #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub struct ReserveItemInput { +pub struct OrderItemInput { pub line: NewOrderLineInput, pub district: u8, + pub is_remote_warehouse: bool, } -impl __sdk::InModule for ReserveItemInput { +impl __sdk::InModule for OrderItemInput { type Module = super::RemoteModule; } diff --git a/tools/tpcc-runner/src/module_bindings/reserve_item_output_type.rs b/tools/tpcc-runner/src/module_bindings/order_item_output_type.rs similarity index 81% rename from tools/tpcc-runner/src/module_bindings/reserve_item_output_type.rs rename to tools/tpcc-runner/src/module_bindings/order_item_output_type.rs index 64822795fec..6bb54f772cc 100644 --- a/tools/tpcc-runner/src/module_bindings/reserve_item_output_type.rs +++ b/tools/tpcc-runner/src/module_bindings/order_item_output_type.rs @@ -6,13 +6,12 @@ use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; #[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] #[sats(crate = __lib)] -pub struct ReserveItemOutput { +pub struct OrderItemOutput { pub s_dist: String, pub s_data: String, pub updated_quantity: i32, - pub rollback_token: u64, } -impl __sdk::InModule for ReserveItemOutput { +impl __sdk::InModule for OrderItemOutput { type Module = super::RemoteModule; } diff --git a/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs b/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs deleted file mode 100644 index d0e44d9c7f4..00000000000 --- a/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs +++ /dev/null @@ -1,61 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::customer_selector_type::CustomerSelector; -use super::order_status_result_type::OrderStatusResult; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct OrderStatusArgs { - pub w_id: u16, - pub d_id: u8, - pub customer: CustomerSelector, -} - -impl __sdk::InModule for OrderStatusArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `order_status`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait order_status { - fn order_status(&self, w_id: u16, d_id: u8, customer: CustomerSelector) { - self.order_status_then(w_id, d_id, customer, |_, _| {}); - } - - fn order_status_then( - &self, - w_id: u16, - d_id: u8, - customer: CustomerSelector, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ); -} - -impl order_status for super::RemoteProcedures { - fn order_status_then( - &self, - w_id: u16, - d_id: u8, - customer: CustomerSelector, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result>( - "order_status", - OrderStatusArgs { w_id, d_id, customer }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/order_status_reducer.rs b/tools/tpcc-runner/src/module_bindings/order_status_reducer.rs new file mode 100644 index 00000000000..3cdf6510085 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_status_reducer.rs @@ -0,0 +1,79 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_selector_type::CustomerSelector; +use super::order_status_result_type::OrderStatusResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct OrderStatusArgs { + pub w_id: u16, + pub d_id: u8, + pub customer: CustomerSelector, +} + +impl From for super::Reducer { + fn from(args: OrderStatusArgs) -> Self { + Self::OrderStatus { + w_id: args.w_id, + d_id: args.d_id, + customer: args.customer, + } + } +} + +impl __sdk::InModule for OrderStatusArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `order_status`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait order_status { + /// Request that the remote module invoke the reducer `order_status` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`order_status:order_status_then`] to run a callback after the reducer completes. + fn order_status(&self, w_id: u16, d_id: u8, customer: CustomerSelector) -> __sdk::Result<()> { + self.order_status_then(w_id, d_id, customer, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `order_status` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn order_status_then( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl order_status for super::RemoteReducers { + fn order_status_then( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback::<_, OrderStatusResult>(OrderStatusArgs { w_id, d_id, customer }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/payment_procedure.rs b/tools/tpcc-runner/src/module_bindings/payment_procedure.rs deleted file mode 100644 index 1557b3e07fe..00000000000 --- a/tools/tpcc-runner/src/module_bindings/payment_procedure.rs +++ /dev/null @@ -1,85 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::customer_selector_type::CustomerSelector; -use super::payment_result_type::PaymentResult; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct PaymentArgs { - pub w_id: u16, - pub d_id: u8, - pub c_w_id: u16, - pub c_d_id: u8, - pub customer: CustomerSelector, - pub payment_amount_cents: i64, -} - -impl __sdk::InModule for PaymentArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `payment`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait payment { - fn payment( - &self, - w_id: u16, - d_id: u8, - c_w_id: u16, - c_d_id: u8, - customer: CustomerSelector, - payment_amount_cents: i64, - ) { - self.payment_then(w_id, d_id, c_w_id, c_d_id, customer, payment_amount_cents, |_, _| {}); - } - - fn payment_then( - &self, - w_id: u16, - d_id: u8, - c_w_id: u16, - c_d_id: u8, - customer: CustomerSelector, - payment_amount_cents: i64, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ); -} - -impl payment for super::RemoteProcedures { - fn payment_then( - &self, - w_id: u16, - d_id: u8, - c_w_id: u16, - c_d_id: u8, - customer: CustomerSelector, - payment_amount_cents: i64, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result>( - "payment", - PaymentArgs { - w_id, - d_id, - c_w_id, - c_d_id, - customer, - payment_amount_cents, - }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/payment_reducer.rs b/tools/tpcc-runner/src/module_bindings/payment_reducer.rs new file mode 100644 index 00000000000..9981730d3e5 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/payment_reducer.rs @@ -0,0 +1,108 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_selector_type::CustomerSelector; +use super::payment_result_type::PaymentResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct PaymentArgs { + pub w_id: u16, + pub d_id: u8, + pub c_w_id: u16, + pub c_d_id: u8, + pub customer: CustomerSelector, + pub payment_amount_cents: i64, +} + +impl From for super::Reducer { + fn from(args: PaymentArgs) -> Self { + Self::Payment { + w_id: args.w_id, + d_id: args.d_id, + c_w_id: args.c_w_id, + c_d_id: args.c_d_id, + customer: args.customer, + payment_amount_cents: args.payment_amount_cents, + } + } +} + +impl __sdk::InModule for PaymentArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `payment`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait payment { + /// Request that the remote module invoke the reducer `payment` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`payment:payment_then`] to run a callback after the reducer completes. + fn payment( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + ) -> __sdk::Result<()> { + self.payment_then(w_id, d_id, c_w_id, c_d_id, customer, payment_amount_cents, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `payment` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn payment_then( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl payment for super::RemoteReducers { + fn payment_then( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback::<_, PaymentResult>( + PaymentArgs { + w_id, + d_id, + c_w_id, + c_d_id, + customer, + payment_amount_cents, + }, + callback, + ) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs b/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs deleted file mode 100644 index f42b2b1883a..00000000000 --- a/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs +++ /dev/null @@ -1,84 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::delivery_queue_ack_type::DeliveryQueueAck; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct QueueDeliveryArgs { - pub run_id: String, - pub driver_id: String, - pub terminal_id: u32, - pub request_id: u64, - pub w_id: u16, - pub carrier_id: u8, -} - -impl __sdk::InModule for QueueDeliveryArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `queue_delivery`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait queue_delivery { - fn queue_delivery( - &self, - run_id: String, - driver_id: String, - terminal_id: u32, - request_id: u64, - w_id: u16, - carrier_id: u8, - ) { - self.queue_delivery_then(run_id, driver_id, terminal_id, request_id, w_id, carrier_id, |_, _| {}); - } - - fn queue_delivery_then( - &self, - run_id: String, - driver_id: String, - terminal_id: u32, - request_id: u64, - w_id: u16, - carrier_id: u8, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ); -} - -impl queue_delivery for super::RemoteProcedures { - fn queue_delivery_then( - &self, - run_id: String, - driver_id: String, - terminal_id: u32, - request_id: u64, - w_id: u16, - carrier_id: u8, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result>( - "queue_delivery", - QueueDeliveryArgs { - run_id, - driver_id, - terminal_id, - request_id, - w_id, - carrier_id, - }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/queue_delivery_reducer.rs b/tools/tpcc-runner/src/module_bindings/queue_delivery_reducer.rs new file mode 100644 index 00000000000..ae15a5f4a4d --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/queue_delivery_reducer.rs @@ -0,0 +1,107 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_queue_ack_type::DeliveryQueueAck; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct QueueDeliveryArgs { + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub w_id: u16, + pub carrier_id: u8, +} + +impl From for super::Reducer { + fn from(args: QueueDeliveryArgs) -> Self { + Self::QueueDelivery { + run_id: args.run_id, + driver_id: args.driver_id, + terminal_id: args.terminal_id, + request_id: args.request_id, + w_id: args.w_id, + carrier_id: args.carrier_id, + } + } +} + +impl __sdk::InModule for QueueDeliveryArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `queue_delivery`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait queue_delivery { + /// Request that the remote module invoke the reducer `queue_delivery` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`queue_delivery:queue_delivery_then`] to run a callback after the reducer completes. + fn queue_delivery( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + ) -> __sdk::Result<()> { + self.queue_delivery_then(run_id, driver_id, terminal_id, request_id, w_id, carrier_id, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `queue_delivery` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn queue_delivery_then( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl queue_delivery for super::RemoteReducers { + fn queue_delivery_then( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback::<_, DeliveryQueueAck>( + QueueDeliveryArgs { + run_id, + driver_id, + terminal_id, + request_id, + w_id, + carrier_id, + }, + callback, + ) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs b/tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs deleted file mode 100644 index 17fd97889c0..00000000000 --- a/tools/tpcc-runner/src/module_bindings/reserve_item_for_remote_order_procedure.rs +++ /dev/null @@ -1,55 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::reserve_item_input_type::ReserveItemInput; -use super::reserve_item_output_type::ReserveItemOutput; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct ReserveItemForRemoteOrderArgs { - pub input: ReserveItemInput, -} - -impl __sdk::InModule for ReserveItemForRemoteOrderArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `reserve_item_for_remote_order`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait reserve_item_for_remote_order { - fn reserve_item_for_remote_order(&self, input: ReserveItemInput) { - self.reserve_item_for_remote_order_then(input, |_, _| {}); - } - - fn reserve_item_for_remote_order_then( - &self, - input: ReserveItemInput, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ); -} - -impl reserve_item_for_remote_order for super::RemoteProcedures { - fn reserve_item_for_remote_order_then( - &self, - input: ReserveItemInput, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result>( - "reserve_item_for_remote_order", - ReserveItemForRemoteOrderArgs { input }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs b/tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs deleted file mode 100644 index ccca101e097..00000000000 --- a/tools/tpcc-runner/src/module_bindings/reserved_item_log_type.rs +++ /dev/null @@ -1,54 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::new_order_line_input_type::NewOrderLineInput; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -pub struct ReservedItemLog { - pub rollback_token: u64, - pub line: NewOrderLineInput, -} - -impl __sdk::InModule for ReservedItemLog { - type Module = super::RemoteModule; -} - -/// Column accessor struct for the table `ReservedItemLog`. -/// -/// Provides typed access to columns for query building. -pub struct ReservedItemLogCols { - pub rollback_token: __sdk::__query_builder::Col, - pub line: __sdk::__query_builder::Col, -} - -impl __sdk::__query_builder::HasCols for ReservedItemLog { - type Cols = ReservedItemLogCols; - fn cols(table_name: &'static str) -> Self::Cols { - ReservedItemLogCols { - rollback_token: __sdk::__query_builder::Col::new(table_name, "rollback_token"), - line: __sdk::__query_builder::Col::new(table_name, "line"), - } - } -} - -/// Indexed column accessor struct for the table `ReservedItemLog`. -/// -/// Provides typed access to indexed columns for query building. -pub struct ReservedItemLogIxCols { - pub rollback_token: __sdk::__query_builder::IxCol, -} - -impl __sdk::__query_builder::HasIxCols for ReservedItemLog { - type IxCols = ReservedItemLogIxCols; - fn ix_cols(table_name: &'static str) -> Self::IxCols { - ReservedItemLogIxCols { - rollback_token: __sdk::__query_builder::IxCol::new(table_name, "rollback_token"), - } - } -} - -impl __sdk::__query_builder::CanBeLookupTable for ReservedItemLog {} diff --git a/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs b/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs index da9424c94c4..309aad5925c 100644 --- a/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs +++ b/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs @@ -56,6 +56,7 @@ impl reset_tpcc for super::RemoteReducers { + Send + 'static, ) -> __sdk::Result<()> { - self.imp.invoke_reducer_with_callback(ResetTpccArgs {}, callback) + self.imp + .invoke_reducer_with_callback::<_, ()>(ResetTpccArgs {}, callback) } } diff --git a/tools/tpcc-runner/src/module_bindings/resolve_and_update_customer_for_payment_reducer.rs b/tools/tpcc-runner/src/module_bindings/resolve_and_update_customer_for_payment_reducer.rs new file mode 100644 index 00000000000..527c71eb637 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/resolve_and_update_customer_for_payment_reducer.rs @@ -0,0 +1,69 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_type::Customer; +use super::payment_request_type::PaymentRequest; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct ResolveAndUpdateCustomerForPaymentArgs { + pub request: PaymentRequest, +} + +impl From for super::Reducer { + fn from(args: ResolveAndUpdateCustomerForPaymentArgs) -> Self { + Self::ResolveAndUpdateCustomerForPayment { request: args.request } + } +} + +impl __sdk::InModule for ResolveAndUpdateCustomerForPaymentArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `resolve_and_update_customer_for_payment`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait resolve_and_update_customer_for_payment { + /// Request that the remote module invoke the reducer `resolve_and_update_customer_for_payment` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`resolve_and_update_customer_for_payment:resolve_and_update_customer_for_payment_then`] to run a callback after the reducer completes. + fn resolve_and_update_customer_for_payment(&self, request: PaymentRequest) -> __sdk::Result<()> { + self.resolve_and_update_customer_for_payment_then(request, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `resolve_and_update_customer_for_payment` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn resolve_and_update_customer_for_payment_then( + &self, + request: PaymentRequest, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl resolve_and_update_customer_for_payment for super::RemoteReducers { + fn resolve_and_update_customer_for_payment_then( + &self, + request: PaymentRequest, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback::<_, Customer>(ResolveAndUpdateCustomerForPaymentArgs { request }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs b/tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs deleted file mode 100644 index 83166d957fd..00000000000 --- a/tools/tpcc-runner/src/module_bindings/rollback_item_reservation_reducer.rs +++ /dev/null @@ -1,68 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -pub(super) struct RollbackItemReservationArgs { - pub rollback_token: u64, -} - -impl From for super::Reducer { - fn from(args: RollbackItemReservationArgs) -> Self { - Self::RollbackItemReservation { - rollback_token: args.rollback_token, - } - } -} - -impl __sdk::InModule for RollbackItemReservationArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the reducer `rollback_item_reservation`. -/// -/// Implemented for [`super::RemoteReducers`]. -pub trait rollback_item_reservation { - /// Request that the remote module invoke the reducer `rollback_item_reservation` to run as soon as possible. - /// - /// This method returns immediately, and errors only if we are unable to send the request. - /// The reducer will run asynchronously in the future, - /// and this method provides no way to listen for its completion status. - /// /// Use [`rollback_item_reservation:rollback_item_reservation_then`] to run a callback after the reducer completes. - fn rollback_item_reservation(&self, rollback_token: u64) -> __sdk::Result<()> { - self.rollback_item_reservation_then(rollback_token, |_, _| {}) - } - - /// Request that the remote module invoke the reducer `rollback_item_reservation` to run as soon as possible, - /// registering `callback` to run when we are notified that the reducer completed. - /// - /// This method returns immediately, and errors only if we are unable to send the request. - /// The reducer will run asynchronously in the future, - /// and its status can be observed with the `callback`. - fn rollback_item_reservation_then( - &self, - rollback_token: u64, - - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) -> __sdk::Result<()>; -} - -impl rollback_item_reservation for super::RemoteReducers { - fn rollback_item_reservation_then( - &self, - rollback_token: u64, - - callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) -> __sdk::Result<()> { - self.imp - .invoke_reducer_with_callback(RollbackItemReservationArgs { rollback_token }, callback) - } -} diff --git a/tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs b/tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs deleted file mode 100644 index 036aef1b0d3..00000000000 --- a/tools/tpcc-runner/src/module_bindings/spacetime_db_uri_type.rs +++ /dev/null @@ -1,45 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -pub struct SpacetimeDbUri { - pub uri: String, -} - -impl __sdk::InModule for SpacetimeDbUri { - type Module = super::RemoteModule; -} - -/// Column accessor struct for the table `SpacetimeDbUri`. -/// -/// Provides typed access to columns for query building. -pub struct SpacetimeDbUriCols { - pub uri: __sdk::__query_builder::Col, -} - -impl __sdk::__query_builder::HasCols for SpacetimeDbUri { - type Cols = SpacetimeDbUriCols; - fn cols(table_name: &'static str) -> Self::Cols { - SpacetimeDbUriCols { - uri: __sdk::__query_builder::Col::new(table_name, "uri"), - } - } -} - -/// Indexed column accessor struct for the table `SpacetimeDbUri`. -/// -/// Provides typed access to indexed columns for query building. -pub struct SpacetimeDbUriIxCols {} - -impl __sdk::__query_builder::HasIxCols for SpacetimeDbUri { - type IxCols = SpacetimeDbUriIxCols; - fn ix_cols(table_name: &'static str) -> Self::IxCols { - SpacetimeDbUriIxCols {} - } -} - -impl __sdk::__query_builder::CanBeLookupTable for SpacetimeDbUri {} diff --git a/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs b/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs deleted file mode 100644 index cecefcbbf0e..00000000000 --- a/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs +++ /dev/null @@ -1,60 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE -// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. - -#![allow(unused, clippy::all)] -use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; - -use super::stock_level_result_type::StockLevelResult; - -#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] -#[sats(crate = __lib)] -struct StockLevelArgs { - pub w_id: u16, - pub d_id: u8, - pub threshold: i32, -} - -impl __sdk::InModule for StockLevelArgs { - type Module = super::RemoteModule; -} - -#[allow(non_camel_case_types)] -/// Extension trait for access to the procedure `stock_level`. -/// -/// Implemented for [`super::RemoteProcedures`]. -pub trait stock_level { - fn stock_level(&self, w_id: u16, d_id: u8, threshold: i32) { - self.stock_level_then(w_id, d_id, threshold, |_, _| {}); - } - - fn stock_level_then( - &self, - w_id: u16, - d_id: u8, - threshold: i32, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ); -} - -impl stock_level for super::RemoteProcedures { - fn stock_level_then( - &self, - w_id: u16, - d_id: u8, - threshold: i32, - - __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) - + Send - + 'static, - ) { - self.imp - .invoke_procedure_with_callback::<_, Result>( - "stock_level", - StockLevelArgs { w_id, d_id, threshold }, - __callback, - ); - } -} diff --git a/tools/tpcc-runner/src/module_bindings/stock_level_reducer.rs b/tools/tpcc-runner/src/module_bindings/stock_level_reducer.rs new file mode 100644 index 00000000000..0825a2caac3 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/stock_level_reducer.rs @@ -0,0 +1,78 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::stock_level_result_type::StockLevelResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct StockLevelArgs { + pub w_id: u16, + pub d_id: u8, + pub threshold: i32, +} + +impl From for super::Reducer { + fn from(args: StockLevelArgs) -> Self { + Self::StockLevel { + w_id: args.w_id, + d_id: args.d_id, + threshold: args.threshold, + } + } +} + +impl __sdk::InModule for StockLevelArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `stock_level`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait stock_level { + /// Request that the remote module invoke the reducer `stock_level` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`stock_level:stock_level_then`] to run a callback after the reducer completes. + fn stock_level(&self, w_id: u16, d_id: u8, threshold: i32) -> __sdk::Result<()> { + self.stock_level_then(w_id, d_id, threshold, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `stock_level` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn stock_level_then( + &self, + w_id: u16, + d_id: u8, + threshold: i32, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl stock_level for super::RemoteReducers { + fn stock_level_then( + &self, + w_id: u16, + d_id: u8, + threshold: i32, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback::<_, StockLevelResult>(StockLevelArgs { w_id, d_id, threshold }, callback) + } +} From 1dd621ddcddc347385d78b04ed5e988fccbcfdf8 Mon Sep 17 00:00:00 2001 From: Phoebe Goldman Date: Sun, 29 Mar 2026 13:49:32 -0400 Subject: [PATCH 38/38] Add timing, rename some spans --- modules/tpcc/src/load.rs | 10 ++--- tools/tpcc-runner/src/loader.rs | 67 ++++++++++++++++++++++----------- 2 files changed, 51 insertions(+), 26 deletions(-) diff --git a/modules/tpcc/src/load.rs b/modules/tpcc/src/load.rs index 753bf5ece40..923883f857f 100644 --- a/modules/tpcc/src/load.rs +++ b/modules/tpcc/src/load.rs @@ -372,7 +372,7 @@ fn run_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) - } fn load_item_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { - let _timer = LogStopwatch::new("load_items"); + let _timer = LogStopwatch::new("load_item_chunk"); if job.next_item_id == 0 || job.next_item_id > ITEMS { return Err(format!("invalid item cursor {}", job.next_item_id)); } @@ -402,7 +402,7 @@ fn load_warehouse_district_chunk( config: &TpccLoadConfig, job: &TpccLoadJob, ) -> Result { - let _timer = LogStopwatch::new("load_warehouses_districts"); + let _timer = LogStopwatch::new("load_warehouses_district_chunk"); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < warehouse_start(config.database_number, config.warehouses_per_database) || job.next_warehouse_id > end_warehouse @@ -439,7 +439,7 @@ fn load_warehouse_district_chunk( } fn load_stock_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { - let _timer = LogStopwatch::new("load_stocks"); + let _timer = LogStopwatch::new("load_stock_chunk"); let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { @@ -480,7 +480,7 @@ fn load_customer_history_chunk( config: &TpccLoadConfig, job: &TpccLoadJob, ) -> Result { - let _timer = LogStopwatch::new("load_customers_history"); + let _timer = LogStopwatch::new("load_customer_history_chunk"); let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { @@ -530,7 +530,7 @@ fn load_customer_history_chunk( } fn load_order_chunk(ctx: &ReducerContext, config: &TpccLoadConfig, job: &TpccLoadJob) -> Result { - let _timer = LogStopwatch::new("load_orders"); + let _timer = LogStopwatch::new("load_order_chunk"); let start_warehouse = warehouse_start(config.database_number, config.warehouses_per_database); let end_warehouse = warehouse_end(config.database_number, config.warehouses_per_database); if job.next_warehouse_id < start_warehouse || job.next_warehouse_id > end_warehouse { diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs index c2f6994df7f..85643ba2032 100644 --- a/tools/tpcc-runner/src/loader.rs +++ b/tools/tpcc-runner/src/loader.rs @@ -60,33 +60,58 @@ fn database_number_chunks(num_databases: u16, parallelism: usize) -> Vec {{ + let before = std::time::Instant::now(); + log::info!("Span {} starting at {:?}", $span_name, before); + let res = (|| Ok::<_, anyhow::Error>({ $($body)* }))(); + let elapsed = before.elapsed(); + log::info!("Span {} ended after {:?}", $span_name, elapsed); + res? + }} +} + fn run_one_database(config: &LoadConfig, database_number: u16, topology: &DatabaseTopology) -> Result<()> { - let database_identity = topology.identity_for_database_number(database_number)?; - log::info!( - "starting tpcc load into {} / {} with {} warehouse(s)", - config.connection.uri, - database_identity, - config.warehouses_per_database - ); + time!("run_one_database" { + let database_identity = topology.identity_for_database_number(database_number)?; + log::info!( + "starting tpcc load into {} / {} with {} warehouse(s)", + config.connection.uri, + database_identity, + config.warehouses_per_database + ); + + let mut client = ModuleClient::connect(&config.connection, database_identity)?; + client.subscribe_load_state()?; + + time!("reset" { + if config.reset { + client.reset_tpcc().context("failed to reset tpcc data")?; + } + }); - let mut client = ModuleClient::connect(&config.connection, database_identity)?; - client.subscribe_load_state()?; + let request = time!("build_load_request" { + build_load_request(config, database_number, topology)? + }); + time!("configure_tpcc_load" {client + .configure_tpcc_load(request) + .context("failed to configure tpcc load")})?; - if config.reset { - client.reset_tpcc().context("failed to reset tpcc data")?; - } + time!("start_tpcc_load" { + client.start_tpcc_load().context("failed to start tpcc load")? + }); - let request = build_load_request(config, database_number, topology)?; - client - .configure_tpcc_load(request) - .context("failed to configure tpcc load")?; - client.start_tpcc_load().context("failed to start tpcc load")?; + time!("wait_for_load_completion" { + wait_for_load_completion(&client, database_identity)? + }); - wait_for_load_completion(&client, database_identity)?; - client.shutdown(); + time!("shutdown" { + client.shutdown() + }); - log::info!("tpcc load for database {database_identity} finished"); - Ok(()) + log::info!("tpcc load for database {database_identity} finished"); + Ok(()) + }) } fn build_load_request(