diff --git a/Cargo.lock b/Cargo.lock index 03618187864..0fa10cea95d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9787,6 +9787,34 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +[[package]] +name = "tpcc-module" +version = "0.1.0" +dependencies = [ + "anyhow", + "log", + "spacetimedb 2.1.0", +] + +[[package]] +name = "tpcc-runner" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum", + "clap 4.5.50", + "env_logger 0.10.2", + "log", + "parking_lot 0.12.5", + "rand 0.9.2", + "reqwest 0.12.24", + "serde", + "serde_json", + "spacetimedb-sdk", + "tokio", + "toml 0.8.23", +] + [[package]] name = "tracing" version = "0.1.41" diff --git a/Cargo.toml b/Cargo.toml index 49c1fa355da..7228dba44c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ members = [ "modules/sdk-test-view", "modules/sdk-test-view-pk", "modules/sdk-test-event-table", + "modules/tpcc", "sdks/rust/tests/test-client", "sdks/rust/tests/test-counter", "sdks/rust/tests/connect_disconnect_client", @@ -62,6 +63,7 @@ members = [ "tools/upgrade-version", "tools/license-check", "tools/replace-spacetimedb", + "tools/tpcc-runner", "tools/generate-client-api", "tools/gen-bindings", "tools/xtask-llm-benchmark", diff --git a/modules/tpcc/Cargo.toml b/modules/tpcc/Cargo.toml new file mode 100644 index 00000000000..e21da7f3a8a --- /dev/null +++ b/modules/tpcc/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "tpcc-module" +version = "0.1.0" +edition.workspace = true + +[lib] +crate-type = ["cdylib"] + +[dependencies] +anyhow.workspace = true +log.workspace = true +spacetimedb = { workspace = true, features = ["unstable"] } + +[lints] +workspace = true diff --git a/modules/tpcc/src/lib.rs b/modules/tpcc/src/lib.rs new file mode 100644 index 00000000000..fe84a00d343 --- /dev/null +++ b/modules/tpcc/src/lib.rs @@ -0,0 +1,1434 @@ +use spacetimedb::{ + procedure, reducer, table, ProcedureContext, ReducerContext, ScheduleAt, SpacetimeType, Table, Timestamp, +}; +use std::collections::BTreeSet; + +const DISTRICTS_PER_WAREHOUSE: u8 = 10; +const CUSTOMERS_PER_DISTRICT: u32 = 3_000; +const ITEMS: u32 = 100_000; +const MAX_C_DATA_LEN: usize = 500; +const TAX_SCALE: i64 = 10_000; + +macro_rules! ensure { + ($cond:expr, $($arg:tt)+) => { + if !($cond) { + return Err(format!($($arg)+)); + } + }; +} + +#[derive(Clone, Debug, SpacetimeType)] +pub enum CustomerSelector { + ById(u32), + ByLastName(String), +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderLineInput { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderLineResult { + pub item_id: u32, + pub item_name: String, + pub supply_w_id: u16, + pub quantity: u32, + pub stock_quantity: i32, + pub item_price_cents: i64, + pub amount_cents: i64, + pub brand_generic: String, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct NewOrderResult { + pub warehouse_tax_bps: i32, + pub district_tax_bps: i32, + pub customer_discount_bps: i32, + pub customer_last: String, + pub customer_credit: String, + pub order_id: u32, + pub entry_d: Timestamp, + pub total_amount_cents: i64, + pub all_local: bool, + pub lines: Vec, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct PaymentResult { + pub warehouse_name: String, + pub district_name: String, + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub customer_credit: String, + pub customer_discount_bps: i32, + pub payment_amount_cents: i64, + pub customer_data: Option, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct OrderStatusLineResult { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, + pub amount_cents: i64, + pub delivery_d: Option, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct OrderStatusResult { + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub order_id: Option, + pub order_entry_d: Option, + pub carrier_id: Option, + pub lines: Vec, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct StockLevelResult { + pub warehouse_id: u16, + pub district_id: u8, + pub threshold: i32, + pub low_stock_count: u32, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct DeliveryQueueAck { + pub scheduled_id: u64, + pub queued_at: Timestamp, + pub warehouse_id: u16, + pub carrier_id: u8, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct DeliveryProgress { + pub run_id: String, + pub pending_jobs: u64, + pub completed_jobs: u64, +} + +#[derive(Clone, Debug, SpacetimeType)] +pub struct DeliveryCompletionView { + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: Timestamp, + pub completed_at: Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +#[table(accessor = warehouse)] +#[derive(Clone, Debug)] +pub struct Warehouse { + #[primary_key] + pub w_id: u16, + pub w_name: String, + pub w_street_1: String, + pub w_street_2: String, + pub w_city: String, + pub w_state: String, + pub w_zip: String, + pub w_tax_bps: i32, + pub w_ytd_cents: i64, +} + +#[table( + accessor = district, + index(accessor = by_w_d, btree(columns = [d_w_id, d_id])) +)] +#[derive(Clone, Debug)] +pub struct District { + pub d_w_id: u16, + pub d_id: u8, + pub d_name: String, + pub d_street_1: String, + pub d_street_2: String, + pub d_city: String, + pub d_state: String, + pub d_zip: String, + pub d_tax_bps: i32, + pub d_ytd_cents: i64, + pub d_next_o_id: u32, +} + +#[table( + accessor = customer, + index(accessor = by_w_d_c_id, btree(columns = [c_w_id, c_d_id, c_id])), + index(accessor = by_w_d_last_first_id, btree(columns = [c_w_id, c_d_id, c_last, c_first, c_id])) +)] +#[derive(Clone, Debug)] +pub struct Customer { + pub c_w_id: u16, + pub c_d_id: u8, + pub c_id: u32, + pub c_first: String, + pub c_middle: String, + pub c_last: String, + pub c_street_1: String, + pub c_street_2: String, + pub c_city: String, + pub c_state: String, + pub c_zip: String, + pub c_phone: String, + pub c_since: Timestamp, + pub c_credit: String, + pub c_credit_lim_cents: i64, + pub c_discount_bps: i32, + pub c_balance_cents: i64, + pub c_ytd_payment_cents: i64, + pub c_payment_cnt: u32, + pub c_delivery_cnt: u32, + pub c_data: String, +} + +#[table(accessor = history)] +#[derive(Clone, Debug)] +pub struct History { + #[primary_key] + #[auto_inc] + pub history_id: u64, + pub h_c_id: u32, + pub h_c_d_id: u8, + pub h_c_w_id: u16, + pub h_d_id: u8, + pub h_w_id: u16, + pub h_date: Timestamp, + pub h_amount_cents: i64, + pub h_data: String, +} + +#[table(accessor = item)] +#[derive(Clone, Debug)] +pub struct Item { + #[primary_key] + pub i_id: u32, + pub i_im_id: u32, + pub i_name: String, + pub i_price_cents: i64, + pub i_data: String, +} + +#[table( + accessor = stock, + index(accessor = by_w_i, btree(columns = [s_w_id, s_i_id])) +)] +#[derive(Clone, Debug)] +pub struct Stock { + pub s_w_id: u16, + pub s_i_id: u32, + pub s_quantity: i32, + pub s_dist_01: String, + pub s_dist_02: String, + pub s_dist_03: String, + pub s_dist_04: String, + pub s_dist_05: String, + pub s_dist_06: String, + pub s_dist_07: String, + pub s_dist_08: String, + pub s_dist_09: String, + pub s_dist_10: String, + pub s_ytd: u64, + pub s_order_cnt: u32, + pub s_remote_cnt: u32, + pub s_data: String, +} + +#[table( + accessor = oorder, + index(accessor = by_w_d_o_id, btree(columns = [o_w_id, o_d_id, o_id])), + index(accessor = by_w_d_c_o_id, btree(columns = [o_w_id, o_d_id, o_c_id, o_id])) +)] +#[derive(Clone, Debug)] +pub struct OOrder { + pub o_w_id: u16, + pub o_d_id: u8, + pub o_id: u32, + pub o_c_id: u32, + pub o_entry_d: Timestamp, + pub o_carrier_id: Option, + pub o_ol_cnt: u8, + pub o_all_local: bool, +} + +#[table( + accessor = new_order_row, + index(accessor = by_w_d_o_id, btree(columns = [no_w_id, no_d_id, no_o_id])) +)] +#[derive(Clone, Debug)] +pub struct NewOrder { + pub no_w_id: u16, + pub no_d_id: u8, + pub no_o_id: u32, +} + +#[table( + accessor = order_line, + index(accessor = by_w_d_o_number, btree(columns = [ol_w_id, ol_d_id, ol_o_id, ol_number])) +)] +#[derive(Clone, Debug)] +pub struct OrderLine { + pub ol_w_id: u16, + pub ol_d_id: u8, + pub ol_o_id: u32, + pub ol_number: u8, + pub ol_i_id: u32, + pub ol_supply_w_id: u16, + pub ol_delivery_d: Option, + pub ol_quantity: u32, + pub ol_amount_cents: i64, + pub ol_dist_info: String, +} + +#[table( + accessor = delivery_job, + scheduled(run_delivery_job), + index(accessor = by_run_id, btree(columns = [run_id])) +)] +#[derive(Clone, Debug)] +pub struct DeliveryJob { + #[primary_key] + #[auto_inc] + pub scheduled_id: u64, + pub scheduled_at: ScheduleAt, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub queued_at: Timestamp, + pub w_id: u16, + pub carrier_id: u8, + pub next_d_id: u8, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +#[table( + accessor = delivery_completion, + index(accessor = by_run_completion, btree(columns = [run_id, completion_id])) +)] +#[derive(Clone, Debug)] +pub struct DeliveryCompletion { + #[primary_key] + #[auto_inc] + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: Timestamp, + pub completed_at: Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +struct PaymentRequest<'a> { + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer_selector: &'a CustomerSelector, + payment_amount_cents: i64, + now: Timestamp, +} + +#[reducer] +pub fn reset_tpcc(ctx: &ReducerContext) -> Result<(), String> { + for row in ctx.db.delivery_job().iter() { + ctx.db.delivery_job().delete(row); + } + for row in ctx.db.delivery_completion().iter() { + ctx.db.delivery_completion().delete(row); + } + for row in ctx.db.order_line().iter() { + ctx.db.order_line().delete(row); + } + for row in ctx.db.new_order_row().iter() { + ctx.db.new_order_row().delete(row); + } + for row in ctx.db.oorder().iter() { + ctx.db.oorder().delete(row); + } + for row in ctx.db.history().iter() { + ctx.db.history().delete(row); + } + for row in ctx.db.customer().iter() { + ctx.db.customer().delete(row); + } + for row in ctx.db.district().iter() { + ctx.db.district().delete(row); + } + for row in ctx.db.stock().iter() { + ctx.db.stock().delete(row); + } + for row in ctx.db.item().iter() { + ctx.db.item().delete(row); + } + for row in ctx.db.warehouse().iter() { + ctx.db.warehouse().delete(row); + } + Ok(()) +} + +#[reducer] +pub fn load_warehouses(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + validate_warehouse_row(&row)?; + ctx.db.warehouse().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_districts(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + insert_district_checked(ctx, row)?; + } + Ok(()) +} + +#[reducer] +pub fn load_customers(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + insert_customer_checked(ctx, row)?; + } + Ok(()) +} + +#[reducer] +pub fn load_history(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for mut row in rows { + row.history_id = 0; + ctx.db.history().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_items(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + validate_item_row(&row)?; + ctx.db.item().insert(row); + } + Ok(()) +} + +#[reducer] +pub fn load_stocks(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + insert_stock_checked(ctx, row)?; + } + Ok(()) +} + +#[reducer] +pub fn load_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + insert_order_checked_reducer(ctx, row)?; + } + Ok(()) +} + +#[reducer] +pub fn load_new_orders(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + insert_new_order_checked_reducer(ctx, row)?; + } + Ok(()) +} + +#[reducer] +pub fn load_order_lines(ctx: &ReducerContext, rows: Vec) -> Result<(), String> { + for row in rows { + insert_order_line_checked_reducer(ctx, row)?; + } + Ok(()) +} + +#[procedure] +pub fn new_order( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, +) -> Result { + ctx.try_with_tx(|tx| new_order_tx(tx, w_id, d_id, c_id, order_lines.clone())) +} + +#[procedure] +pub fn payment( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, +) -> Result { + let now = ctx.timestamp; + ctx.try_with_tx(|tx| { + payment_tx( + tx, + PaymentRequest { + w_id, + d_id, + c_w_id, + c_d_id, + customer_selector: &customer, + payment_amount_cents, + now, + }, + ) + }) +} + +#[procedure] +pub fn order_status( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + customer: CustomerSelector, +) -> Result { + ctx.try_with_tx(|tx| order_status_tx(tx, w_id, d_id, &customer)) +} + +#[procedure] +pub fn stock_level( + ctx: &mut ProcedureContext, + w_id: u16, + d_id: u8, + threshold: i32, +) -> Result { + ctx.try_with_tx(|tx| stock_level_tx(tx, w_id, d_id, threshold)) +} + +#[procedure] +pub fn queue_delivery( + ctx: &mut ProcedureContext, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, +) -> Result { + let queued_at = ctx.timestamp; + ctx.try_with_tx(|tx| { + ensure_warehouse_exists(tx, w_id)?; + ensure!((1..=10).contains(&carrier_id), "carrier_id must be in the range 1..=10"); + + let job = tx.db.delivery_job().insert(DeliveryJob { + scheduled_id: 0, + scheduled_at: queued_at.into(), + run_id: run_id.clone(), + driver_id: driver_id.clone(), + terminal_id, + request_id, + queued_at, + w_id, + carrier_id, + next_d_id: 1, + skipped_districts: 0, + processed_districts: 0, + }); + + Ok(DeliveryQueueAck { + scheduled_id: job.scheduled_id, + queued_at, + warehouse_id: w_id, + carrier_id, + }) + }) +} + +#[procedure] +pub fn delivery_progress(ctx: &mut ProcedureContext, run_id: String) -> Result { + ctx.try_with_tx(|tx| { + let pending_jobs = tx.db.delivery_job().by_run_id().filter(&run_id).count() as u64; + let completed_jobs = tx + .db + .delivery_completion() + .by_run_completion() + .filter((&run_id, 0u64..)) + .count() as u64; + Ok(DeliveryProgress { + run_id: run_id.clone(), + pending_jobs, + completed_jobs, + }) + }) +} + +#[procedure] +pub fn fetch_delivery_completions( + ctx: &mut ProcedureContext, + run_id: String, + after_completion_id: u64, + limit: u32, +) -> Result, String> { + ctx.try_with_tx(|tx| { + let limit = limit as usize; + let rows = tx + .db + .delivery_completion() + .by_run_completion() + .filter((&run_id, after_completion_id.saturating_add(1)..)) + .take(limit) + .map(as_delivery_completion_view) + .collect(); + Ok(rows) + }) +} + +#[reducer] +pub fn run_delivery_job(ctx: &ReducerContext, job: DeliveryJob) -> Result<(), String> { + let mut next_job = job.clone(); + + let had_order = process_delivery_district(ctx, job.w_id, job.next_d_id, job.carrier_id, ctx.timestamp)?; + next_job.processed_districts = next_job.processed_districts.saturating_add(1); + if !had_order { + next_job.skipped_districts = next_job.skipped_districts.saturating_add(1); + } + + let jobs = ctx.db.delivery_job(); + jobs.scheduled_id().delete(job.scheduled_id); + + if job.next_d_id >= DISTRICTS_PER_WAREHOUSE { + ctx.db.delivery_completion().insert(DeliveryCompletion { + completion_id: 0, + run_id: job.run_id, + driver_id: job.driver_id, + terminal_id: job.terminal_id, + request_id: job.request_id, + warehouse_id: job.w_id, + carrier_id: job.carrier_id, + queued_at: job.queued_at, + completed_at: ctx.timestamp, + skipped_districts: next_job.skipped_districts, + processed_districts: next_job.processed_districts, + }); + } else { + next_job.next_d_id += 1; + next_job.scheduled_at = ctx.timestamp.into(); + ctx.db.delivery_job().insert(next_job); + } + + Ok(()) +} + +fn validate_warehouse_row(row: &Warehouse) -> Result<(), String> { + ensure!( + (1..=i32::from(u16::MAX)).contains(&(row.w_id as i32)), + "warehouse id must be positive" + ); + Ok(()) +} + +fn validate_district_row(row: &District) -> Result<(), String> { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&row.d_id), + "district id out of range" + ); + Ok(()) +} + +fn validate_customer_row(row: &Customer) -> Result<(), String> { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&row.c_d_id), + "customer district id out of range" + ); + ensure!( + (1..=CUSTOMERS_PER_DISTRICT).contains(&row.c_id), + "customer id out of range" + ); + Ok(()) +} + +fn validate_item_row(row: &Item) -> Result<(), String> { + ensure!((1..=ITEMS).contains(&row.i_id), "item id out of range"); + Ok(()) +} + +fn validate_stock_row(row: &Stock) -> Result<(), String> { + ensure!((1..=ITEMS).contains(&row.s_i_id), "stock item id out of range"); + Ok(()) +} + +fn validate_order_row(row: &OOrder) -> Result<(), String> { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&row.o_d_id), + "order district id out of range" + ); + ensure!((5..=15).contains(&row.o_ol_cnt), "order line count out of range"); + Ok(()) +} + +fn validate_new_order_row(row: &NewOrder) -> Result<(), String> { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&row.no_d_id), + "new-order district id out of range" + ); + Ok(()) +} + +fn validate_order_line_row(row: &OrderLine) -> Result<(), String> { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&row.ol_d_id), + "order-line district id out of range" + ); + ensure!((1..=15).contains(&row.ol_number), "order-line number out of range"); + ensure!(row.ol_quantity > 0, "order-line quantity must be positive"); + Ok(()) +} + +fn insert_district_checked(ctx: &ReducerContext, row: District) -> Result<(), String> { + validate_district_row(&row)?; + ensure!( + ctx.db + .district() + .by_w_d() + .filter((row.d_w_id, row.d_id)) + .next() + .is_none(), + "district ({}, {}) already exists", + row.d_w_id, + row.d_id + ); + ctx.db.district().insert(row); + Ok(()) +} + +fn insert_customer_checked(ctx: &ReducerContext, row: Customer) -> Result<(), String> { + validate_customer_row(&row)?; + ensure!( + ctx.db + .customer() + .by_w_d_c_id() + .filter((row.c_w_id, row.c_d_id, row.c_id)) + .next() + .is_none(), + "customer ({}, {}, {}) already exists", + row.c_w_id, + row.c_d_id, + row.c_id + ); + ctx.db.customer().insert(row); + Ok(()) +} + +fn insert_stock_checked(ctx: &ReducerContext, row: Stock) -> Result<(), String> { + validate_stock_row(&row)?; + ensure!( + ctx.db + .stock() + .by_w_i() + .filter((row.s_w_id, row.s_i_id)) + .next() + .is_none(), + "stock ({}, {}) already exists", + row.s_w_id, + row.s_i_id + ); + ctx.db.stock().insert(row); + Ok(()) +} + +fn insert_order_checked_reducer(ctx: &ReducerContext, row: OOrder) -> Result<(), String> { + validate_order_row(&row)?; + ensure!( + ctx.db + .oorder() + .by_w_d_o_id() + .filter((row.o_w_id, row.o_d_id, row.o_id)) + .next() + .is_none(), + "order ({}, {}, {}) already exists", + row.o_w_id, + row.o_d_id, + row.o_id + ); + ctx.db.oorder().insert(row); + Ok(()) +} + +fn insert_new_order_checked_reducer(ctx: &ReducerContext, row: NewOrder) -> Result<(), String> { + validate_new_order_row(&row)?; + ensure!( + ctx.db + .new_order_row() + .by_w_d_o_id() + .filter((row.no_w_id, row.no_d_id, row.no_o_id)) + .next() + .is_none(), + "new-order ({}, {}, {}) already exists", + row.no_w_id, + row.no_d_id, + row.no_o_id + ); + ctx.db.new_order_row().insert(row); + Ok(()) +} + +fn insert_order_line_checked_reducer(ctx: &ReducerContext, row: OrderLine) -> Result<(), String> { + validate_order_line_row(&row)?; + ensure!( + ctx.db + .order_line() + .by_w_d_o_number() + .filter((row.ol_w_id, row.ol_d_id, row.ol_o_id, row.ol_number)) + .next() + .is_none(), + "order-line ({}, {}, {}, {}) already exists", + row.ol_w_id, + row.ol_d_id, + row.ol_o_id, + row.ol_number + ); + ctx.db.order_line().insert(row); + Ok(()) +} + +fn insert_order_checked_tx(tx: &spacetimedb::TxContext, row: OOrder) -> Result<(), String> { + validate_order_row(&row)?; + ensure!( + tx.db + .oorder() + .by_w_d_o_id() + .filter((row.o_w_id, row.o_d_id, row.o_id)) + .next() + .is_none(), + "order ({}, {}, {}) already exists", + row.o_w_id, + row.o_d_id, + row.o_id + ); + tx.db.oorder().insert(row); + Ok(()) +} + +fn insert_new_order_checked_tx(tx: &spacetimedb::TxContext, row: NewOrder) -> Result<(), String> { + validate_new_order_row(&row)?; + ensure!( + tx.db + .new_order_row() + .by_w_d_o_id() + .filter((row.no_w_id, row.no_d_id, row.no_o_id)) + .next() + .is_none(), + "new-order ({}, {}, {}) already exists", + row.no_w_id, + row.no_d_id, + row.no_o_id + ); + tx.db.new_order_row().insert(row); + Ok(()) +} + +fn insert_order_line_checked_tx(tx: &spacetimedb::TxContext, row: OrderLine) -> Result<(), String> { + validate_order_line_row(&row)?; + ensure!( + tx.db + .order_line() + .by_w_d_o_number() + .filter((row.ol_w_id, row.ol_d_id, row.ol_o_id, row.ol_number)) + .next() + .is_none(), + "order-line ({}, {}, {}, {}) already exists", + row.ol_w_id, + row.ol_d_id, + row.ol_o_id, + row.ol_number + ); + tx.db.order_line().insert(row); + Ok(()) +} + +fn new_order_tx( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, +) -> Result { + ensure!( + (1..=DISTRICTS_PER_WAREHOUSE).contains(&d_id), + "district id out of range" + ); + ensure!( + (5..=15).contains(&order_lines.len()), + "new-order requires between 5 and 15 order lines" + ); + + let warehouse = find_warehouse(tx, w_id)?; + let district = find_district(tx, w_id, d_id)?; + let customer = find_customer_by_id(tx, w_id, d_id, c_id)?; + + let mut touched_items = Vec::with_capacity(order_lines.len()); + let mut all_local = true; + for line in &order_lines { + ensure!(line.quantity > 0, "order line quantity must be positive"); + let item = find_item(tx, line.item_id)?; + let stock = find_stock(tx, line.supply_w_id, line.item_id)?; + if line.supply_w_id != w_id { + all_local = false; + } + touched_items.push((line.clone(), item, stock)); + } + + let order_id = district.d_next_o_id; + + replace_district_tx( + tx, + district.clone(), + District { + d_next_o_id: district.d_next_o_id + 1, + ..district.clone() + }, + )?; + + insert_order_checked_tx( + tx, + OOrder { + o_w_id: w_id, + o_d_id: d_id, + o_id: order_id, + o_c_id: c_id, + o_entry_d: tx.timestamp, + o_carrier_id: None, + o_ol_cnt: order_lines.len() as u8, + o_all_local: all_local, + }, + )?; + + insert_new_order_checked_tx( + tx, + NewOrder { + no_w_id: w_id, + no_d_id: d_id, + no_o_id: order_id, + }, + )?; + + let mut line_results = Vec::with_capacity(touched_items.len()); + let mut subtotal_cents = 0i64; + for (idx, (line, item, stock)) in touched_items.into_iter().enumerate() { + let updated_stock_quantity = adjust_stock_quantity(stock.s_quantity, line.quantity as i32); + replace_stock_tx( + tx, + stock.clone(), + Stock { + s_quantity: updated_stock_quantity, + s_ytd: stock.s_ytd + u64::from(line.quantity), + s_order_cnt: stock.s_order_cnt + 1, + s_remote_cnt: stock.s_remote_cnt + u32::from(line.supply_w_id != w_id), + ..stock.clone() + }, + )?; + + let line_amount_cents = item.i_price_cents * i64::from(line.quantity); + subtotal_cents += line_amount_cents; + let dist_info = district_stock_info(&stock, d_id); + insert_order_line_checked_tx( + tx, + OrderLine { + ol_w_id: w_id, + ol_d_id: d_id, + ol_o_id: order_id, + ol_number: (idx + 1) as u8, + ol_i_id: line.item_id, + ol_supply_w_id: line.supply_w_id, + ol_delivery_d: None, + ol_quantity: line.quantity, + ol_amount_cents: line_amount_cents, + ol_dist_info: dist_info, + }, + )?; + + let brand_generic = if contains_original(&item.i_data) && contains_original(&stock.s_data) { + "B" + } else { + "G" + }; + line_results.push(NewOrderLineResult { + item_id: item.i_id, + item_name: item.i_name, + supply_w_id: line.supply_w_id, + quantity: line.quantity, + stock_quantity: updated_stock_quantity, + item_price_cents: item.i_price_cents, + amount_cents: line_amount_cents, + brand_generic: brand_generic.to_string(), + }); + } + + let taxed = apply_tax( + subtotal_cents, + i64::from(warehouse.w_tax_bps) + i64::from(district.d_tax_bps), + ); + let total_amount_cents = apply_discount(taxed, i64::from(customer.c_discount_bps)); + + Ok(NewOrderResult { + warehouse_tax_bps: warehouse.w_tax_bps, + district_tax_bps: district.d_tax_bps, + customer_discount_bps: customer.c_discount_bps, + customer_last: customer.c_last, + customer_credit: customer.c_credit, + order_id, + entry_d: tx.timestamp, + total_amount_cents, + all_local, + lines: line_results, + }) +} + +fn payment_tx(tx: &spacetimedb::TxContext, req: PaymentRequest<'_>) -> Result { + ensure!(req.payment_amount_cents > 0, "payment amount must be positive"); + + let warehouse = find_warehouse(tx, req.w_id)?; + let district = find_district(tx, req.w_id, req.d_id)?; + let customer = resolve_customer(tx, req.c_w_id, req.c_d_id, req.customer_selector)?; + + tx.db.warehouse().w_id().update(Warehouse { + w_ytd_cents: warehouse.w_ytd_cents + req.payment_amount_cents, + ..warehouse.clone() + }); + + replace_district_tx( + tx, + district.clone(), + District { + d_ytd_cents: district.d_ytd_cents + req.payment_amount_cents, + ..district.clone() + }, + )?; + + let mut updated_customer = Customer { + c_balance_cents: customer.c_balance_cents - req.payment_amount_cents, + c_ytd_payment_cents: customer.c_ytd_payment_cents + req.payment_amount_cents, + c_payment_cnt: customer.c_payment_cnt + 1, + ..customer.clone() + }; + + if updated_customer.c_credit == "BC" { + let prefix = format!( + "{} {} {} {} {} {} {}|", + updated_customer.c_id, + updated_customer.c_d_id, + updated_customer.c_w_id, + req.d_id, + req.w_id, + req.payment_amount_cents, + req.now.to_micros_since_unix_epoch() + ); + updated_customer.c_data = format!("{prefix}{}", updated_customer.c_data); + updated_customer.c_data.truncate(MAX_C_DATA_LEN); + } + + replace_customer_tx(tx, customer.clone(), updated_customer.clone())?; + + tx.db.history().insert(History { + history_id: 0, + h_c_id: updated_customer.c_id, + h_c_d_id: updated_customer.c_d_id, + h_c_w_id: updated_customer.c_w_id, + h_d_id: req.d_id, + h_w_id: req.w_id, + h_date: req.now, + h_amount_cents: req.payment_amount_cents, + h_data: format!("{} {}", warehouse.w_name, district.d_name), + }); + + Ok(PaymentResult { + warehouse_name: warehouse.w_name, + district_name: district.d_name, + customer_id: updated_customer.c_id, + customer_first: updated_customer.c_first, + customer_middle: updated_customer.c_middle, + customer_last: updated_customer.c_last, + customer_balance_cents: updated_customer.c_balance_cents, + customer_credit: updated_customer.c_credit.clone(), + customer_discount_bps: updated_customer.c_discount_bps, + payment_amount_cents: req.payment_amount_cents, + customer_data: if updated_customer.c_credit == "BC" { + Some(updated_customer.c_data) + } else { + None + }, + }) +} + +fn order_status_tx( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + customer_selector: &CustomerSelector, +) -> Result { + let customer = resolve_customer(tx, w_id, d_id, customer_selector)?; + + let mut latest_order: Option = None; + for row in tx + .db + .oorder() + .by_w_d_c_o_id() + .filter((w_id, d_id, customer.c_id, 0u32..)) + { + latest_order = Some(row); + } + + let mut lines = Vec::new(); + if let Some(order) = &latest_order { + for line in tx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, order.o_id, 0u8..)) + { + lines.push(OrderStatusLineResult { + item_id: line.ol_i_id, + supply_w_id: line.ol_supply_w_id, + quantity: line.ol_quantity, + amount_cents: line.ol_amount_cents, + delivery_d: line.ol_delivery_d, + }); + } + } + + Ok(OrderStatusResult { + customer_id: customer.c_id, + customer_first: customer.c_first, + customer_middle: customer.c_middle, + customer_last: customer.c_last, + customer_balance_cents: customer.c_balance_cents, + order_id: latest_order.as_ref().map(|row| row.o_id), + order_entry_d: latest_order.as_ref().map(|row| row.o_entry_d), + carrier_id: latest_order.as_ref().and_then(|row| row.o_carrier_id), + lines, + }) +} + +fn stock_level_tx( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + threshold: i32, +) -> Result { + let district = find_district(tx, w_id, d_id)?; + let start_o_id = district.d_next_o_id.saturating_sub(20); + let end_o_id = district.d_next_o_id; + + let mut item_ids = BTreeSet::new(); + for line in tx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, start_o_id..end_o_id)) + { + item_ids.insert(line.ol_i_id); + } + + let mut low_stock_count = 0u32; + for item_id in item_ids { + let stock = find_stock(tx, w_id, item_id)?; + if stock.s_quantity < threshold { + low_stock_count += 1; + } + } + + Ok(StockLevelResult { + warehouse_id: w_id, + district_id: d_id, + threshold, + low_stock_count, + }) +} + +fn process_delivery_district( + ctx: &ReducerContext, + w_id: u16, + d_id: u8, + carrier_id: u8, + delivered_at: Timestamp, +) -> Result { + let maybe_new_order = ctx.db.new_order_row().by_w_d_o_id().filter((w_id, d_id, 0u32..)).next(); + let Some(new_order) = maybe_new_order else { + return Ok(false); + }; + + let order = find_order_by_id_reducer(ctx, w_id, d_id, new_order.no_o_id)?; + + ctx.db.new_order_row().delete(new_order); + replace_order_reducer( + ctx, + order.clone(), + OOrder { + o_carrier_id: Some(carrier_id), + ..order.clone() + }, + )?; + + let mut total_amount_cents = 0i64; + let order_lines: Vec<_> = ctx + .db + .order_line() + .by_w_d_o_number() + .filter((w_id, d_id, order.o_id, 0u8..)) + .collect(); + for line in order_lines { + total_amount_cents += line.ol_amount_cents; + replace_order_line_reducer( + ctx, + line.clone(), + OrderLine { + ol_delivery_d: Some(delivered_at), + ..line + }, + )?; + } + + let customer = find_customer_by_id_reducer(ctx, w_id, d_id, order.o_c_id)?; + replace_customer_reducer( + ctx, + customer.clone(), + Customer { + c_balance_cents: customer.c_balance_cents + total_amount_cents, + c_delivery_cnt: customer.c_delivery_cnt + 1, + ..customer + }, + )?; + + Ok(true) +} + +fn resolve_customer( + tx: &spacetimedb::TxContext, + w_id: u16, + d_id: u8, + selector: &CustomerSelector, +) -> Result { + match selector { + CustomerSelector::ById(id) => find_customer_by_id(tx, w_id, d_id, *id), + CustomerSelector::ByLastName(last_name) => { + let rows: Vec<_> = tx + .db + .customer() + .by_w_d_last_first_id() + .filter((w_id, d_id, last_name.as_str(), ""..)) + .collect(); + ensure!(!rows.is_empty(), "customer not found"); + Ok(rows[(rows.len() - 1) / 2].clone()) + } + } +} + +fn find_warehouse(tx: &spacetimedb::TxContext, w_id: u16) -> Result { + tx.db + .warehouse() + .w_id() + .find(w_id) + .ok_or_else(|| format!("warehouse {w_id} not found")) +} + +fn ensure_warehouse_exists(tx: &spacetimedb::TxContext, w_id: u16) -> Result<(), String> { + find_warehouse(tx, w_id).map(|_| ()) +} + +fn find_district(tx: &spacetimedb::TxContext, w_id: u16, d_id: u8) -> Result { + tx.db + .district() + .by_w_d() + .filter((w_id, d_id)) + .next() + .ok_or_else(|| format!("district ({w_id}, {d_id}) not found")) +} + +fn find_customer_by_id(tx: &spacetimedb::TxContext, w_id: u16, d_id: u8, c_id: u32) -> Result { + tx.db + .customer() + .by_w_d_c_id() + .filter((w_id, d_id, c_id)) + .next() + .ok_or_else(|| format!("customer ({w_id}, {d_id}, {c_id}) not found")) +} + +fn find_customer_by_id_reducer(ctx: &ReducerContext, w_id: u16, d_id: u8, c_id: u32) -> Result { + ctx.db + .customer() + .by_w_d_c_id() + .filter((w_id, d_id, c_id)) + .next() + .ok_or_else(|| format!("customer ({w_id}, {d_id}, {c_id}) not found")) +} + +fn find_order_by_id_reducer(ctx: &ReducerContext, w_id: u16, d_id: u8, o_id: u32) -> Result { + ctx.db + .oorder() + .by_w_d_o_id() + .filter((w_id, d_id, o_id)) + .next() + .ok_or_else(|| format!("order ({w_id}, {d_id}, {o_id}) not found")) +} + +fn find_item(tx: &spacetimedb::TxContext, item_id: u32) -> Result { + tx.db + .item() + .i_id() + .find(item_id) + .ok_or_else(|| format!("item {item_id} not found")) +} + +fn find_stock(tx: &spacetimedb::TxContext, w_id: u16, item_id: u32) -> Result { + tx.db + .stock() + .by_w_i() + .filter((w_id, item_id)) + .next() + .ok_or_else(|| format!("stock ({w_id}, {item_id}) not found")) +} + +fn replace_district_tx(tx: &spacetimedb::TxContext, old: District, new: District) -> Result<(), String> { + ensure!( + old.d_w_id == new.d_w_id && old.d_id == new.d_id, + "district identity cannot change during update" + ); + tx.db.district().delete(old); + tx.db.district().insert(new); + Ok(()) +} + +fn replace_customer_tx(tx: &spacetimedb::TxContext, old: Customer, new: Customer) -> Result<(), String> { + ensure!( + old.c_w_id == new.c_w_id && old.c_d_id == new.c_d_id && old.c_id == new.c_id, + "customer identity cannot change during update" + ); + tx.db.customer().delete(old); + tx.db.customer().insert(new); + Ok(()) +} + +fn replace_stock_tx(tx: &spacetimedb::TxContext, old: Stock, new: Stock) -> Result<(), String> { + ensure!( + old.s_w_id == new.s_w_id && old.s_i_id == new.s_i_id, + "stock identity cannot change during update" + ); + tx.db.stock().delete(old); + tx.db.stock().insert(new); + Ok(()) +} + +fn replace_customer_reducer(ctx: &ReducerContext, old: Customer, new: Customer) -> Result<(), String> { + ensure!( + old.c_w_id == new.c_w_id && old.c_d_id == new.c_d_id && old.c_id == new.c_id, + "customer identity cannot change during update" + ); + ctx.db.customer().delete(old); + ctx.db.customer().insert(new); + Ok(()) +} + +fn replace_order_reducer(ctx: &ReducerContext, old: OOrder, new: OOrder) -> Result<(), String> { + ensure!( + old.o_w_id == new.o_w_id && old.o_d_id == new.o_d_id && old.o_id == new.o_id, + "order identity cannot change during update" + ); + ctx.db.oorder().delete(old); + ctx.db.oorder().insert(new); + Ok(()) +} + +fn replace_order_line_reducer(ctx: &ReducerContext, old: OrderLine, new: OrderLine) -> Result<(), String> { + ensure!( + old.ol_w_id == new.ol_w_id + && old.ol_d_id == new.ol_d_id + && old.ol_o_id == new.ol_o_id + && old.ol_number == new.ol_number, + "order-line identity cannot change during update" + ); + ctx.db.order_line().delete(old); + ctx.db.order_line().insert(new); + Ok(()) +} + +fn district_stock_info(stock: &Stock, d_id: u8) -> String { + match d_id { + 1 => stock.s_dist_01.clone(), + 2 => stock.s_dist_02.clone(), + 3 => stock.s_dist_03.clone(), + 4 => stock.s_dist_04.clone(), + 5 => stock.s_dist_05.clone(), + 6 => stock.s_dist_06.clone(), + 7 => stock.s_dist_07.clone(), + 8 => stock.s_dist_08.clone(), + 9 => stock.s_dist_09.clone(), + 10 => stock.s_dist_10.clone(), + _ => String::new(), + } +} + +fn contains_original(data: &str) -> bool { + data.contains("ORIGINAL") +} + +fn adjust_stock_quantity(current_quantity: i32, ordered_quantity: i32) -> i32 { + if current_quantity - ordered_quantity >= 10 { + current_quantity - ordered_quantity + } else { + current_quantity - ordered_quantity + 91 + } +} + +fn apply_tax(amount_cents: i64, total_tax_bps: i64) -> i64 { + amount_cents * (TAX_SCALE + total_tax_bps) / TAX_SCALE +} + +fn apply_discount(amount_cents: i64, discount_bps: i64) -> i64 { + amount_cents * (TAX_SCALE - discount_bps) / TAX_SCALE +} + +fn as_delivery_completion_view(row: DeliveryCompletion) -> DeliveryCompletionView { + DeliveryCompletionView { + completion_id: row.completion_id, + run_id: row.run_id, + driver_id: row.driver_id, + terminal_id: row.terminal_id, + request_id: row.request_id, + warehouse_id: row.warehouse_id, + carrier_id: row.carrier_id, + queued_at: row.queued_at, + completed_at: row.completed_at, + skipped_districts: row.skipped_districts, + processed_districts: row.processed_districts, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn middle_customer_selection_uses_lower_middle_for_even_count() { + let idx = (4usize - 1) / 2; + assert_eq!(idx, 1); + } + + #[test] + fn stock_quantity_wraps_like_tpcc() { + assert_eq!(adjust_stock_quantity(20, 5), 15); + assert_eq!(adjust_stock_quantity(10, 5), 96); + } +} diff --git a/tools/tpcc-runner/Cargo.toml b/tools/tpcc-runner/Cargo.toml new file mode 100644 index 00000000000..fdad0563564 --- /dev/null +++ b/tools/tpcc-runner/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "tpcc-runner" +version = "0.1.0" +edition.workspace = true + +[[bin]] +name = "tpcc-runner" +path = "src/main.rs" + +[dependencies] +anyhow.workspace = true +axum.workspace = true +clap.workspace = true +env_logger.workspace = true +log.workspace = true +parking_lot.workspace = true +rand.workspace = true +reqwest.workspace = true +serde.workspace = true +serde_json.workspace = true +tokio.workspace = true +toml.workspace = true + +spacetimedb-sdk = { path = "../../sdks/rust" } + +[lints] +workspace = true diff --git a/tools/tpcc-runner/README.md b/tools/tpcc-runner/README.md new file mode 100644 index 00000000000..93a7106718e --- /dev/null +++ b/tools/tpcc-runner/README.md @@ -0,0 +1,95 @@ +# TPC-C Runner + +`tpcc-runner` is the Rust-side harness for the SpacetimeDB TPC-C module in `modules/tpcc`. + +It supports three subcommands: + +- `load`: populate the module with the initial TPC-C dataset +- `driver`: run one benchmark driver with one logical terminal per SDK connection +- `coordinator`: synchronize multiple remote drivers and aggregate their summaries + +## Local workflow + +1. Publish or start the `modules/tpcc` module. +2. Load data: + +```bash +cargo run -p tpcc-runner -- load --database tpcc --warehouses 1 +``` + +3. Run a single local driver: + +```bash +cargo run -p tpcc-runner -- driver --database tpcc --warehouses 1 --terminals 10 --warmup-secs 5 --measure-secs 30 +``` + +The driver writes: + +- `summary.json` +- `txn_events.ndjson` + +under `tpcc-results///` unless `--output-dir` is provided. + +## Distributed workflow + +Start the coordinator: + +```bash +cargo run -p tpcc-runner -- coordinator --expected-drivers 2 --warmup-secs 5 --measure-secs 30 +``` + +Start each remote driver with disjoint terminal ranges: + +```bash +cargo run -p tpcc-runner -- driver --database tpcc --warehouses 2 --terminal-start 1 --terminals 10 --coordinator-url http://coordinator-host:7878 +cargo run -p tpcc-runner -- driver --database tpcc --warehouses 2 --terminal-start 11 --terminals 10 --coordinator-url http://coordinator-host:7878 +``` + +When all expected drivers register, the coordinator publishes a common schedule and writes an aggregated `summary.json` under `tpcc-results/coordinator//`. + +## Config file + +All subcommands accept `--config `. The file is TOML with optional sections: + +```toml +[connection] +uri = "http://127.0.0.1:3000" +database = "tpcc" +confirmed_reads = true +timeout_secs = 30 + +[load] +warehouses = 1 +batch_size = 500 +reset = true + +[driver] +driver_id = "driver-a" +terminal_start = 1 +terminals = 10 +warehouses = 1 +warmup_secs = 5 +measure_secs = 30 +delivery_wait_secs = 60 +keying_time_scale = 1.0 +think_time_scale = 1.0 + +[coordinator] +run_id = "tpcc-demo" +listen = "127.0.0.1:7878" +expected_drivers = 2 +warmup_secs = 5 +measure_secs = 30 +output_dir = "tpcc-results/coordinator" +``` + +CLI flags override config-file values. + +## Regenerating bindings + +If the module signatures change, regenerate the Rust SDK bindings: + +```bash +cargo build -p spacetimedb-standalone +cargo run -p spacetimedb-cli -- generate --lang rust --out-dir tools/tpcc-runner/src/module_bindings --module-path modules/tpcc --yes +``` diff --git a/tools/tpcc-runner/src/client.rs b/tools/tpcc-runner/src/client.rs new file mode 100644 index 00000000000..102a9ebe01c --- /dev/null +++ b/tools/tpcc-runner/src/client.rs @@ -0,0 +1,340 @@ +use anyhow::{anyhow, bail, Context, Result}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use crate::config::ConnectionConfig; +use crate::module_bindings::*; +use spacetimedb_sdk::DbContext; +use tokio::sync::oneshot; + +pub struct ModuleClient { + conn: DbConnection, + timeout: Duration, +} + +impl ModuleClient { + pub async fn connect(config: &ConnectionConfig) -> Result { + let (ready_tx, ready_rx) = oneshot::channel(); + let ready_tx = Arc::new(Mutex::new(Some(ready_tx))); + let success_tx = Arc::clone(&ready_tx); + let error_tx = Arc::clone(&ready_tx); + let mut builder = DbConnection::builder() + .with_uri(config.uri.clone()) + .with_database_name(config.database.clone()) + .with_confirmed_reads(config.confirmed_reads) + .on_connect(move |_, _, _| { + if let Some(tx) = success_tx.lock().expect("ready mutex poisoned").take() { + let _ = tx.send(Ok::<(), anyhow::Error>(())); + } + }) + .on_connect_error(move |_, error| { + if let Some(tx) = error_tx.lock().expect("ready mutex poisoned").take() { + let _ = tx.send(Err(anyhow!("connection failed: {error}"))); + } + }); + + if let Some(token) = &config.token { + builder = builder.with_token(Some(token.clone())); + } + + let conn = builder.build().context("failed to build database connection")?; + Self::await_with_conn(&conn, Duration::from_secs(config.timeout_secs), "connection", ready_rx).await??; + + Ok(Self { + conn, + timeout: Duration::from_secs(config.timeout_secs), + }) + } + + pub async fn reset_tpcc(&self) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.reset_tpcc_then(move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("reset_tpcc", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("reset_tpcc failed: {}", message), + Err(err) => Err(anyhow!("reset_tpcc internal error: {}", err)), + } + } + + pub async fn load_warehouses(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_warehouses_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_warehouses", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_warehouses failed: {}", message), + Err(err) => Err(anyhow!("load_warehouses internal error: {}", err)), + } + } + + pub async fn load_districts(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_districts_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_districts", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_districts failed: {}", message), + Err(err) => Err(anyhow!("load_districts internal error: {}", err)), + } + } + + pub async fn load_customers(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_customers_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_customers", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_customers failed: {}", message), + Err(err) => Err(anyhow!("load_customers internal error: {}", err)), + } + } + + pub async fn load_history(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_history_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_history", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_history failed: {}", message), + Err(err) => Err(anyhow!("load_history internal error: {}", err)), + } + } + + pub async fn load_items(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_items_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_items", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_items failed: {}", message), + Err(err) => Err(anyhow!("load_items internal error: {}", err)), + } + } + + pub async fn load_stocks(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_stocks_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_stocks", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_stocks failed: {}", message), + Err(err) => Err(anyhow!("load_stocks internal error: {}", err)), + } + } + + pub async fn load_orders(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_orders_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_orders", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_orders failed: {}", message), + Err(err) => Err(anyhow!("load_orders internal error: {}", err)), + } + } + + pub async fn load_new_orders(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_new_orders_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_new_orders", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_new_orders failed: {}", message), + Err(err) => Err(anyhow!("load_new_orders internal error: {}", err)), + } + } + + pub async fn load_order_lines(&self, rows: Vec) -> Result<()> { + let (tx, rx) = oneshot::channel(); + self.conn.reducers.load_order_lines_then(rows, move |_, res| { + let _ = tx.send(res); + })?; + match self.await_result("load_order_lines", rx).await? { + Ok(Ok(())) => Ok(()), + Ok(Err(message)) => bail!("load_order_lines failed: {}", message), + Err(err) => Err(anyhow!("load_order_lines internal error: {}", err)), + } + } + + pub async fn new_order( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + ) -> Result> { + let (tx, rx) = oneshot::channel(); + self.conn + .procedures + .new_order_then(w_id, d_id, c_id, order_lines, move |_, res| { + let _ = tx.send(res); + }); + match self.await_result("new_order", rx).await? { + Ok(value) => Ok(value), + Err(err) => Err(anyhow!("new_order internal error: {}", err)), + } + } + + pub async fn payment( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + ) -> Result> { + let (tx, rx) = oneshot::channel(); + self.conn.procedures.payment_then( + w_id, + d_id, + c_w_id, + c_d_id, + customer, + payment_amount_cents, + move |_, res| { + let _ = tx.send(res); + }, + ); + match self.await_result("payment", rx).await? { + Ok(value) => Ok(value), + Err(err) => Err(anyhow!("payment internal error: {}", err)), + } + } + + pub async fn order_status( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + ) -> Result> { + let (tx, rx) = oneshot::channel(); + self.conn + .procedures + .order_status_then(w_id, d_id, customer, move |_, res| { + let _ = tx.send(res); + }); + match self.await_result("order_status", rx).await? { + Ok(value) => Ok(value), + Err(err) => Err(anyhow!("order_status internal error: {}", err)), + } + } + + pub async fn stock_level(&self, w_id: u16, d_id: u8, threshold: i32) -> Result> { + let (tx, rx) = oneshot::channel(); + self.conn + .procedures + .stock_level_then(w_id, d_id, threshold, move |_, res| { + let _ = tx.send(res); + }); + match self.await_result("stock_level", rx).await? { + Ok(value) => Ok(value), + Err(err) => Err(anyhow!("stock_level internal error: {}", err)), + } + } + + pub async fn queue_delivery( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + ) -> Result> { + let (tx, rx) = oneshot::channel(); + self.conn.procedures.queue_delivery_then( + run_id, + driver_id, + terminal_id, + request_id, + w_id, + carrier_id, + move |_, res| { + let _ = tx.send(res); + }, + ); + match self.await_result("queue_delivery", rx).await? { + Ok(value) => Ok(value), + Err(err) => Err(anyhow!("queue_delivery internal error: {}", err)), + } + } + + pub async fn delivery_progress(&self, run_id: String) -> Result> { + let (tx, rx) = oneshot::channel(); + self.conn.procedures.delivery_progress_then(run_id, move |_, res| { + let _ = tx.send(res); + }); + match self.await_result("delivery_progress", rx).await? { + Ok(value) => Ok(value), + Err(err) => Err(anyhow!("delivery_progress internal error: {}", err)), + } + } + + pub async fn fetch_delivery_completions( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + ) -> Result, String>> { + let (tx, rx) = oneshot::channel(); + self.conn + .procedures + .fetch_delivery_completions_then(run_id, after_completion_id, limit, move |_, res| { + let _ = tx.send(res); + }); + match self.await_result("fetch_delivery_completions", rx).await? { + Ok(value) => Ok(value), + Err(err) => Err(anyhow!("fetch_delivery_completions internal error: {}", err)), + } + } + + pub async fn shutdown(self) { + if self.conn.is_active() && self.conn.disconnect().is_ok() { + let _ = tokio::time::timeout(self.timeout, self.conn.advance_one_message_async()).await; + } + } + + async fn await_result(&self, operation: &str, rx: oneshot::Receiver) -> Result { + Self::await_with_conn(&self.conn, self.timeout, operation, rx).await + } + + async fn await_with_conn( + conn: &DbConnection, + timeout: Duration, + operation: &str, + mut rx: oneshot::Receiver, + ) -> Result { + tokio::time::timeout(timeout, async { + loop { + tokio::select! { + result = &mut rx => { + return result.map_err(|_| anyhow!("{operation} callback dropped")); + } + message = conn.advance_one_message_async() => { + message.with_context(|| format!("{operation} connection loop failed"))?; + } + } + } + }) + .await + .with_context(|| format!("timed out waiting for {operation}"))? + } +} + +pub fn expect_ok(operation: &str, result: Result>) -> Result { + match result? { + Ok(value) => Ok(value), + Err(message) => bail!("{} failed: {}", operation, message), + } +} diff --git a/tools/tpcc-runner/src/config.rs b/tools/tpcc-runner/src/config.rs new file mode 100644 index 00000000000..4042cb85f59 --- /dev/null +++ b/tools/tpcc-runner/src/config.rs @@ -0,0 +1,306 @@ +use anyhow::{bail, Context, Result}; +use clap::{Args, Parser, Subcommand}; +use serde::Deserialize; +use std::fs; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; + +#[derive(Debug, Parser)] +#[command(name = "tpcc-runner")] +pub struct Cli { + #[arg(long)] + pub config: Option, + #[command(subcommand)] + pub command: Command, +} + +#[derive(Debug, Subcommand)] +pub enum Command { + Load(LoadArgs), + Driver(DriverArgs), + Coordinator(CoordinatorArgs), +} + +#[derive(Debug, Clone)] +pub struct ConnectionConfig { + pub uri: String, + pub database: String, + pub token: Option, + pub confirmed_reads: bool, + pub timeout_secs: u64, +} + +#[derive(Debug, Clone)] +pub struct LoadConfig { + pub connection: ConnectionConfig, + pub warehouses: u16, + pub batch_size: usize, + pub reset: bool, +} + +#[derive(Debug, Clone)] +pub struct DriverConfig { + pub connection: ConnectionConfig, + pub run_id: Option, + pub driver_id: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, + pub warmup_secs: u64, + pub measure_secs: u64, + pub output_dir: Option, + pub coordinator_url: Option, + pub delivery_wait_secs: u64, + pub keying_time_scale: f64, + pub think_time_scale: f64, +} + +#[derive(Debug, Clone)] +pub struct CoordinatorConfig { + pub run_id: String, + pub listen: SocketAddr, + pub expected_drivers: usize, + pub warmup_secs: u64, + pub measure_secs: u64, + pub output_dir: PathBuf, +} + +#[derive(Debug, Clone, Args)] +pub struct LoadArgs { + #[command(flatten)] + pub connection: ConnectionArgs, + #[arg(long)] + pub warehouses: Option, + #[arg(long)] + pub batch_size: Option, + #[arg(long)] + pub reset: Option, +} + +#[derive(Debug, Clone, Args)] +pub struct DriverArgs { + #[command(flatten)] + pub connection: ConnectionArgs, + #[arg(long)] + pub run_id: Option, + #[arg(long)] + pub driver_id: Option, + #[arg(long)] + pub terminal_start: Option, + #[arg(long)] + pub terminals: Option, + #[arg(long)] + pub warehouses: Option, + #[arg(long)] + pub warmup_secs: Option, + #[arg(long)] + pub measure_secs: Option, + #[arg(long)] + pub output_dir: Option, + #[arg(long)] + pub coordinator_url: Option, + #[arg(long)] + pub delivery_wait_secs: Option, + #[arg(long)] + pub keying_time_scale: Option, + #[arg(long)] + pub think_time_scale: Option, +} + +#[derive(Debug, Clone, Args)] +pub struct CoordinatorArgs { + #[arg(long)] + pub run_id: Option, + #[arg(long)] + pub listen: Option, + #[arg(long)] + pub expected_drivers: Option, + #[arg(long)] + pub warmup_secs: Option, + #[arg(long)] + pub measure_secs: Option, + #[arg(long)] + pub output_dir: Option, +} + +#[derive(Debug, Clone, Default, Args)] +pub struct ConnectionArgs { + #[arg(long)] + pub uri: Option, + #[arg(long)] + pub database: Option, + #[arg(long)] + pub token: Option, + #[arg(long)] + pub confirmed_reads: Option, + #[arg(long)] + pub timeout_secs: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct FileConfig { + #[serde(default)] + connection: FileConnectionConfig, + #[serde(default)] + load: FileLoadConfig, + #[serde(default)] + driver: FileDriverConfig, + #[serde(default)] + coordinator: FileCoordinatorConfig, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileConnectionConfig { + uri: Option, + database: Option, + token: Option, + confirmed_reads: Option, + timeout_secs: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileLoadConfig { + warehouses: Option, + batch_size: Option, + reset: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileDriverConfig { + run_id: Option, + driver_id: Option, + terminal_start: Option, + terminals: Option, + warehouses: Option, + warmup_secs: Option, + measure_secs: Option, + output_dir: Option, + coordinator_url: Option, + delivery_wait_secs: Option, + keying_time_scale: Option, + think_time_scale: Option, +} + +#[derive(Debug, Clone, Default, Deserialize)] +struct FileCoordinatorConfig { + run_id: Option, + listen: Option, + expected_drivers: Option, + warmup_secs: Option, + measure_secs: Option, + output_dir: Option, +} + +impl FileConfig { + pub fn load(path: Option<&Path>) -> Result { + let Some(path) = path else { + return Ok(Self::default()); + }; + let raw = fs::read_to_string(path).with_context(|| format!("failed to read config {}", path.display()))?; + toml::from_str(&raw).with_context(|| format!("failed to parse config {}", path.display())) + } +} + +impl ConnectionArgs { + fn resolve(&self, file: &FileConnectionConfig) -> ConnectionConfig { + ConnectionConfig { + uri: self + .uri + .clone() + .or_else(|| file.uri.clone()) + .unwrap_or_else(|| "http://127.0.0.1:3000".to_string()), + database: self + .database + .clone() + .or_else(|| file.database.clone()) + .unwrap_or_else(|| "tpcc".to_string()), + token: self.token.clone().or_else(|| file.token.clone()), + confirmed_reads: self.confirmed_reads.or(file.confirmed_reads).unwrap_or(true), + timeout_secs: self.timeout_secs.or(file.timeout_secs).unwrap_or(30), + } + } +} + +impl LoadArgs { + pub fn resolve(&self, file: &FileConfig) -> LoadConfig { + LoadConfig { + connection: self.connection.resolve(&file.connection), + warehouses: self.warehouses.or(file.load.warehouses).unwrap_or(1), + batch_size: self.batch_size.or(file.load.batch_size).unwrap_or(500), + reset: self.reset.or(file.load.reset).unwrap_or(true), + } + } +} + +impl DriverArgs { + pub fn resolve(&self, file: &FileConfig) -> Result { + let connection = self.connection.resolve(&file.connection); + let warehouse_count = self.warehouses.or(file.driver.warehouses).unwrap_or(1); + let terminals = self + .terminals + .or(file.driver.terminals) + .unwrap_or(u32::from(warehouse_count) * 10); + let terminal_start = self.terminal_start.or(file.driver.terminal_start).unwrap_or(1); + if terminals == 0 { + bail!("terminal count must be positive"); + } + Ok(DriverConfig { + connection, + run_id: self.run_id.clone().or_else(|| file.driver.run_id.clone()), + driver_id: self + .driver_id + .clone() + .or_else(|| file.driver.driver_id.clone()) + .unwrap_or_else(default_driver_id), + terminal_start, + terminals, + warehouse_count, + warmup_secs: self.warmup_secs.or(file.driver.warmup_secs).unwrap_or(5), + measure_secs: self.measure_secs.or(file.driver.measure_secs).unwrap_or(30), + output_dir: self.output_dir.clone().or_else(|| file.driver.output_dir.clone()), + coordinator_url: self + .coordinator_url + .clone() + .or_else(|| file.driver.coordinator_url.clone()), + delivery_wait_secs: self.delivery_wait_secs.or(file.driver.delivery_wait_secs).unwrap_or(60), + keying_time_scale: self.keying_time_scale.or(file.driver.keying_time_scale).unwrap_or(1.0), + think_time_scale: self.think_time_scale.or(file.driver.think_time_scale).unwrap_or(1.0), + }) + } +} + +impl CoordinatorArgs { + pub fn resolve(&self, file: &FileConfig) -> Result { + let expected_drivers = self.expected_drivers.or(file.coordinator.expected_drivers).unwrap_or(1); + if expected_drivers == 0 { + bail!("expected_drivers must be positive"); + } + Ok(CoordinatorConfig { + run_id: self + .run_id + .clone() + .or_else(|| file.coordinator.run_id.clone()) + .unwrap_or_else(default_run_id), + listen: self + .listen + .or(file.coordinator.listen) + .unwrap_or_else(|| "127.0.0.1:7878".parse().expect("hard-coded coordinator address")), + expected_drivers, + warmup_secs: self.warmup_secs.or(file.coordinator.warmup_secs).unwrap_or(5), + measure_secs: self.measure_secs.or(file.coordinator.measure_secs).unwrap_or(30), + output_dir: self + .output_dir + .clone() + .or_else(|| file.coordinator.output_dir.clone()) + .unwrap_or_else(|| PathBuf::from("tpcc-results/coordinator")), + }) + } +} + +pub fn default_run_id() -> String { + format!("tpcc-{}", crate::summary::now_millis()) +} + +pub fn default_driver_id() -> String { + format!("driver-{}", std::process::id()) +} diff --git a/tools/tpcc-runner/src/coordinator.rs b/tools/tpcc-runner/src/coordinator.rs new file mode 100644 index 00000000000..f9060f5520d --- /dev/null +++ b/tools/tpcc-runner/src/coordinator.rs @@ -0,0 +1,125 @@ +use anyhow::{Context, Result}; +use axum::extract::State; +use axum::routing::{get, post}; +use axum::{Json, Router}; +use parking_lot::Mutex; +use std::collections::BTreeMap; +use std::fs; +use std::path::Path; +use std::sync::Arc; + +use crate::config::CoordinatorConfig; +use crate::protocol::{ + RegisterDriverRequest, RegisterDriverResponse, RunSchedule, ScheduleResponse, SubmitSummaryRequest, +}; +use crate::summary::{aggregate_summaries, now_millis, write_json, AggregateSummary, DriverSummary}; + +#[derive(Clone)] +struct AppState { + inner: Arc>, +} + +struct CoordinatorState { + config: CoordinatorConfig, + registrations: BTreeMap, + schedule: Option, + summaries: BTreeMap, +} + +pub async fn run(config: CoordinatorConfig) -> Result<()> { + fs::create_dir_all(&config.output_dir) + .with_context(|| format!("failed to create {}", config.output_dir.display()))?; + + let state = AppState { + inner: Arc::new(Mutex::new(CoordinatorState { + config: config.clone(), + registrations: BTreeMap::new(), + schedule: None, + summaries: BTreeMap::new(), + })), + }; + + let app = Router::new() + .route("/register", post(register_driver)) + .route("/schedule", get(get_schedule)) + .route("/summary", post(submit_summary)) + .with_state(state); + + let listener = tokio::net::TcpListener::bind(config.listen) + .await + .with_context(|| format!("failed to bind {}", config.listen))?; + log::info!("coordinator listening on {}", config.listen); + axum::serve(listener, app).await.context("coordinator server exited") +} + +async fn register_driver( + State(state): State, + Json(request): Json, +) -> Json { + let mut inner = state.inner.lock(); + inner.registrations.insert(request.driver_id.clone(), request); + maybe_create_schedule(&mut inner); + Json(RegisterDriverResponse { accepted: true }) +} + +async fn get_schedule(State(state): State) -> Json { + let inner = state.inner.lock(); + Json(ScheduleResponse { + ready: inner.schedule.is_some(), + schedule: inner.schedule.clone(), + }) +} + +async fn submit_summary( + State(state): State, + Json(request): Json, +) -> Result, axum::http::StatusCode> { + let aggregate = { + let mut inner = state.inner.lock(); + inner + .summaries + .insert(request.summary.driver_id.clone(), request.summary.clone()); + if inner.summaries.len() == inner.config.expected_drivers { + let summaries: Vec<_> = inner.summaries.values().cloned().collect(); + let aggregate = aggregate_summaries(inner.config.run_id.clone(), &summaries); + if let Err(err) = write_aggregate(&inner.config.output_dir, &aggregate) { + log::error!("failed to write aggregate summary: {err:#}"); + return Err(axum::http::StatusCode::INTERNAL_SERVER_ERROR); + } + aggregate + } else { + aggregate_summaries( + inner.config.run_id.clone(), + &inner.summaries.values().cloned().collect::>(), + ) + } + }; + Ok(Json(aggregate)) +} + +fn maybe_create_schedule(inner: &mut CoordinatorState) { + if inner.schedule.is_some() || inner.registrations.len() < inner.config.expected_drivers { + return; + } + let warmup_start_ms = now_millis() + 2_000; + let measure_start_ms = warmup_start_ms + (inner.config.warmup_secs * 1_000); + let measure_end_ms = measure_start_ms + (inner.config.measure_secs * 1_000); + inner.schedule = Some(RunSchedule { + run_id: inner.config.run_id.clone(), + warmup_start_ms, + measure_start_ms, + measure_end_ms, + stop_ms: measure_end_ms, + }); + log::info!( + "all {} driver(s) registered; schedule ready for run {}", + inner.config.expected_drivers, + inner.config.run_id + ); +} + +fn write_aggregate(output_dir: &Path, aggregate: &AggregateSummary) -> Result<()> { + let run_dir = output_dir.join(&aggregate.run_id); + fs::create_dir_all(&run_dir).with_context(|| format!("failed to create {}", run_dir.display()))?; + write_json(&run_dir.join("summary.json"), aggregate) +} diff --git a/tools/tpcc-runner/src/driver.rs b/tools/tpcc-runner/src/driver.rs new file mode 100644 index 00000000000..22b6faeb0d1 --- /dev/null +++ b/tools/tpcc-runner/src/driver.rs @@ -0,0 +1,650 @@ +use anyhow::{anyhow, bail, Context, Result}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::task::JoinSet; + +use crate::client::{expect_ok, ModuleClient}; +use crate::config::{default_run_id, DriverConfig}; +use crate::module_bindings::*; +use crate::protocol::{ + RegisterDriverRequest, RegisterDriverResponse, RunSchedule, ScheduleResponse, SubmitSummaryRequest, +}; +use crate::summary::{write_json, DriverSummary, DriverSummaryMeta, SharedMetrics, TransactionKind, TransactionRecord}; +use crate::tpcc::*; + +struct TerminalRuntime { + config: DriverConfig, + metrics: SharedMetrics, + abort: Arc, + request_ids: Arc, + schedule: RunSchedule, + run_constants: RunConstants, + assignment: TerminalAssignment, + seed: u64, +} + +struct TransactionContext<'a> { + client: &'a ModuleClient, + config: &'a DriverConfig, + run_id: &'a str, + driver_id: &'a str, + assignment: &'a TerminalAssignment, + constants: &'a RunConstants, + request_ids: &'a AtomicU64, +} + +pub async fn run(config: DriverConfig) -> Result<()> { + let schedule = resolve_schedule(&config).await?; + let run_id = schedule.run_id.clone(); + let output_dir = resolve_output_dir(&config, &run_id); + fs::create_dir_all(&output_dir).with_context(|| format!("failed to create {}", output_dir.display()))?; + + let events_path = output_dir.join("txn_events.ndjson"); + let summary_path = output_dir.join("summary.json"); + let metrics = SharedMetrics::create(&run_id, &config.driver_id, &events_path)?; + + let run_constants = { + let mut rng = StdRng::seed_from_u64(schedule.measure_start_ms ^ u64::from(config.terminal_start)); + generate_run_constants(&mut rng) + }; + + let abort = Arc::new(AtomicBool::new(false)); + let request_ids = Arc::new(AtomicU64::new(1)); + let mut tasks = JoinSet::new(); + + for offset in 0..config.terminals { + let terminal_id = config.terminal_start + offset; + let assignment = assign_terminal(terminal_id, config.warehouse_count).ok_or_else(|| { + anyhow!( + "terminal {} exceeds warehouse capacity {}", + terminal_id, + config.warehouse_count + ) + })?; + let terminal_seed = schedule.measure_start_ms ^ ((terminal_id as u64) << 32) ^ 0xabcdu64; + let terminal_config = config.clone(); + let terminal_metrics = metrics.clone(); + let terminal_abort = abort.clone(); + let terminal_constants = run_constants.clone(); + let terminal_schedule = schedule.clone(); + let terminal_request_ids = request_ids.clone(); + let runtime = TerminalRuntime { + config: terminal_config, + metrics: terminal_metrics, + abort: terminal_abort, + request_ids: terminal_request_ids, + schedule: terminal_schedule, + run_constants: terminal_constants, + assignment, + seed: terminal_seed, + }; + tasks.spawn(run_terminal(runtime)); + } + + let mut first_error: Option = None; + while let Some(result) = tasks.join_next().await { + match result { + Ok(Ok(())) => {} + Ok(Err(err)) => { + abort.store(true, Ordering::Relaxed); + if first_error.is_none() { + first_error = Some(err); + } + } + Err(err) => { + abort.store(true, Ordering::Relaxed); + if first_error.is_none() { + first_error = Some(anyhow!("terminal task failed: {}", err)); + } + } + } + } + if let Some(err) = first_error { + return Err(err); + } + + harvest_delivery_completions(&config, &schedule, &metrics).await?; + + let summary = metrics.finalize(DriverSummaryMeta { + run_id: run_id.clone(), + driver_id: config.driver_id.clone(), + uri: config.connection.uri.clone(), + database: config.connection.database.clone(), + terminal_start: config.terminal_start, + terminals: config.terminals, + warehouse_count: config.warehouse_count, + warmup_secs: config.warmup_secs, + measure_secs: config.measure_secs, + measure_start_ms: schedule.measure_start_ms, + measure_end_ms: schedule.measure_end_ms, + })?; + write_json(&summary_path, &summary)?; + print_summary(&summary, &summary_path, &events_path); + + if let Some(coordinator_url) = &config.coordinator_url { + submit_summary(coordinator_url, summary).await?; + } + + Ok(()) +} + +async fn run_terminal(runtime: TerminalRuntime) -> Result<()> { + let TerminalRuntime { + config, + metrics, + abort, + request_ids, + schedule, + run_constants, + assignment, + seed, + } = runtime; + let client = ModuleClient::connect(&config.connection).await?; + sleep_until_ms(schedule.warmup_start_ms).await; + + let mut rng = StdRng::seed_from_u64(seed); + while !abort.load(Ordering::Relaxed) { + if crate::summary::now_millis() >= schedule.stop_ms { + break; + } + + let kind = choose_transaction(&mut rng); + let started_ms = crate::summary::now_millis(); + let context = TransactionContext { + client: &client, + config: &config, + run_id: &schedule.run_id, + driver_id: &config.driver_id, + assignment: &assignment, + constants: &run_constants, + request_ids: &request_ids, + }; + let event = execute_transaction(&context, kind, &mut rng, started_ms).await; + + match event { + Ok(record) => { + if record.timestamp_ms >= schedule.measure_start_ms && record.timestamp_ms < schedule.measure_end_ms { + metrics.record(record)?; + } + } + Err(err) => { + abort.store(true, Ordering::Relaxed); + client.shutdown().await; + return Err(err); + } + } + + let delay = keying_time(kind, config.keying_time_scale) + think_time(kind, config.think_time_scale, &mut rng); + if !delay.is_zero() && crate::summary::now_millis() < schedule.stop_ms { + tokio::time::sleep(delay).await; + } + } + + client.shutdown().await; + Ok(()) +} + +async fn execute_transaction( + context: &TransactionContext<'_>, + kind: TransactionKind, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + match kind { + TransactionKind::NewOrder => { + execute_new_order( + context.client, + context.config.warehouse_count, + context.assignment, + context.constants, + rng, + started_ms, + ) + .await + } + TransactionKind::Payment => { + execute_payment( + context.client, + context.config.warehouse_count, + context.assignment, + context.constants, + rng, + started_ms, + ) + .await + } + TransactionKind::OrderStatus => { + execute_order_status(context.client, context.assignment, context.constants, rng, started_ms).await + } + TransactionKind::Delivery => { + execute_delivery( + context.client, + context.run_id, + context.driver_id, + context.assignment, + context.request_ids, + rng, + started_ms, + ) + .await + } + TransactionKind::StockLevel => execute_stock_level(context.client, context.assignment, rng, started_ms).await, + } +} + +async fn execute_new_order( + client: &ModuleClient, + warehouse_count: u16, + assignment: &TerminalAssignment, + constants: &RunConstants, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let customer_id = customer_id(rng, constants); + let line_count = rng.random_range(5..=15); + let invalid_line = rng.random_bool(0.01); + let mut order_lines = Vec::with_capacity(line_count); + let mut remote_order_line_count = 0u32; + for idx in 0..line_count { + let remote = warehouse_count > 1 && rng.random_bool(0.01); + let supply_w_id = if remote { + remote_order_line_count += 1; + let mut remote = assignment.warehouse_id; + while remote == assignment.warehouse_id { + remote = rng.random_range(1..=warehouse_count); + } + remote + } else { + assignment.warehouse_id + }; + let item_id = if invalid_line && idx + 1 == line_count { + ITEMS + 1 + } else { + item_id(rng, constants) + }; + order_lines.push(NewOrderLineInput { + item_id, + supply_w_id, + quantity: rng.random_range(1..=10), + }); + } + + let result = client + .new_order( + assignment.warehouse_id, + assignment.district_id, + customer_id, + order_lines, + ) + .await?; + let finished_ms = crate::summary::now_millis(); + match result { + Ok(_) => Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::NewOrder, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name: false, + order_line_count: line_count as u32, + remote_order_line_count, + detail: None, + }), + Err(message) if invalid_line => Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::NewOrder, + success: false, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: true, + remote: false, + by_last_name: false, + order_line_count: line_count as u32, + remote_order_line_count, + detail: Some(message), + }), + Err(message) => bail!( + "unexpected new_order failure for terminal {}: {}", + assignment.terminal_id, + message + ), + } +} + +async fn execute_payment( + client: &ModuleClient, + warehouse_count: u16, + assignment: &TerminalAssignment, + constants: &RunConstants, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let remote = warehouse_count > 1 && rng.random_bool(0.15); + let c_w_id = if remote { + let mut other = assignment.warehouse_id; + while other == assignment.warehouse_id { + other = rng.random_range(1..=warehouse_count); + } + other + } else { + assignment.warehouse_id + }; + let c_d_id = if remote { + rng.random_range(1..=DISTRICTS_PER_WAREHOUSE) + } else { + assignment.district_id + }; + let by_last_name = rng.random_bool(0.60); + let selector = if by_last_name { + CustomerSelector::ByLastName(customer_last_name(rng, constants)) + } else { + CustomerSelector::ById(customer_id(rng, constants)) + }; + let amount_cents = rng.random_range(100..=500_000); + let finished = expect_ok( + "payment", + client + .payment( + assignment.warehouse_id, + assignment.district_id, + c_w_id, + c_d_id, + selector, + amount_cents, + ) + .await, + )?; + let _ = finished; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::Payment, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote, + by_last_name, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +async fn execute_order_status( + client: &ModuleClient, + assignment: &TerminalAssignment, + constants: &RunConstants, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let by_last_name = rng.random_bool(0.60); + let selector = if by_last_name { + CustomerSelector::ByLastName(customer_last_name(rng, constants)) + } else { + CustomerSelector::ById(customer_id(rng, constants)) + }; + let _ = expect_ok( + "order_status", + client + .order_status(assignment.warehouse_id, assignment.district_id, selector) + .await, + )?; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::OrderStatus, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +async fn execute_delivery( + client: &ModuleClient, + run_id: &str, + driver_id: &str, + assignment: &TerminalAssignment, + request_ids: &AtomicU64, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let request_id = request_ids.fetch_add(1, Ordering::Relaxed); + let _ = expect_ok( + "queue_delivery", + client + .queue_delivery( + run_id.to_string(), + driver_id.to_string(), + assignment.terminal_id, + request_id, + assignment.warehouse_id, + rng.random_range(1..=10), + ) + .await, + )?; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::Delivery, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name: false, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +async fn execute_stock_level( + client: &ModuleClient, + assignment: &TerminalAssignment, + rng: &mut StdRng, + started_ms: u64, +) -> Result { + let threshold = rng.random_range(10..=20); + let _ = expect_ok( + "stock_level", + client + .stock_level(assignment.warehouse_id, assignment.district_id, threshold) + .await, + )?; + let finished_ms = crate::summary::now_millis(); + Ok(TransactionRecord { + timestamp_ms: finished_ms, + terminal_id: assignment.terminal_id, + kind: TransactionKind::StockLevel, + success: true, + latency_ms: finished_ms.saturating_sub(started_ms), + rollback: false, + remote: false, + by_last_name: false, + order_line_count: 0, + remote_order_line_count: 0, + detail: None, + }) +} + +async fn resolve_schedule(config: &DriverConfig) -> Result { + if let Some(coordinator_url) = &config.coordinator_url { + let client = reqwest::Client::new(); + let register = RegisterDriverRequest { + driver_id: config.driver_id.clone(), + terminal_start: config.terminal_start, + terminals: config.terminals, + warehouse_count: config.warehouse_count, + }; + let response: RegisterDriverResponse = client + .post(format!("{}/register", coordinator_url)) + .json(®ister) + .send() + .await + .context("failed to register driver with coordinator")? + .error_for_status() + .context("coordinator rejected register request")? + .json() + .await + .context("failed to decode register response")?; + if !response.accepted { + bail!("coordinator did not accept driver registration"); + } + loop { + let response: ScheduleResponse = client + .get(format!("{}/schedule", coordinator_url)) + .send() + .await + .context("failed to poll coordinator schedule")? + .error_for_status() + .context("coordinator schedule endpoint returned error")? + .json() + .await + .context("failed to decode schedule response")?; + if let Some(schedule) = response.schedule { + return Ok(schedule); + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + } + + let run_id = config.run_id.clone().unwrap_or_else(default_run_id); + let warmup_start_ms = crate::summary::now_millis() + 2_000; + let measure_start_ms = warmup_start_ms + (config.warmup_secs * 1_000); + let measure_end_ms = measure_start_ms + (config.measure_secs * 1_000); + Ok(RunSchedule { + run_id, + warmup_start_ms, + measure_start_ms, + measure_end_ms, + stop_ms: measure_end_ms, + }) +} + +async fn harvest_delivery_completions( + config: &DriverConfig, + schedule: &RunSchedule, + metrics: &SharedMetrics, +) -> Result<()> { + let expected = metrics.delivery_queued(); + if expected == 0 { + return Ok(()); + } + let client = ModuleClient::connect(&config.connection).await?; + let progress = expect_ok( + "delivery_progress", + client.delivery_progress(schedule.run_id.clone()).await, + )?; + log::info!( + "delivery progress before harvest: pending_jobs={} completed_jobs={}", + progress.pending_jobs, + progress.completed_jobs + ); + let deadline = crate::summary::now_millis() + (config.delivery_wait_secs * 1_000); + let mut seen_for_driver = 0u64; + let mut after_completion_id = 0u64; + + loop { + if seen_for_driver >= expected { + break; + } + let batch = expect_ok( + "fetch_delivery_completions", + client + .fetch_delivery_completions(schedule.run_id.clone(), after_completion_id, 512) + .await, + )?; + if batch.is_empty() { + if crate::summary::now_millis() >= deadline { + break; + } + tokio::time::sleep(Duration::from_millis(250)).await; + continue; + } + for row in batch { + after_completion_id = after_completion_id.max(row.completion_id); + if row.driver_id == config.driver_id { + seen_for_driver += 1; + metrics.record_delivery_completion(&row); + } + } + } + + if seen_for_driver < expected { + log::warn!( + "driver {} observed only {} / {} delivery completions before timeout", + config.driver_id, + seen_for_driver, + expected + ); + } + + client.shutdown().await; + Ok(()) +} + +async fn submit_summary(coordinator_url: &str, summary: DriverSummary) -> Result<()> { + let client = reqwest::Client::new(); + client + .post(format!("{}/summary", coordinator_url)) + .json(&SubmitSummaryRequest { summary }) + .send() + .await + .context("failed to submit summary to coordinator")? + .error_for_status() + .context("coordinator rejected summary")?; + Ok(()) +} + +fn resolve_output_dir(config: &DriverConfig, run_id: &str) -> PathBuf { + match &config.output_dir { + Some(path) => path.clone(), + None => PathBuf::from("tpcc-results").join(run_id).join(&config.driver_id), + } +} + +fn print_summary(summary: &DriverSummary, summary_path: &Path, events_path: &Path) { + log::info!("run_id={}", summary.run_id); + log::info!("driver_id={}", summary.driver_id); + log::info!("tpmc_like={:.2}", summary.tpmc_like); + log::info!("total_transactions={}", summary.total_transactions); + for (name, txn) in &summary.transactions { + log::info!( + "{} count={} success={} failure={} p95_ms={} p99_ms={}", + name, + txn.count, + txn.success, + txn.failure, + txn.p95_latency_ms, + txn.p99_latency_ms + ); + } + log::info!( + "delivery queued={} completed={} pending={}", + summary.delivery.queued, + summary.delivery.completed, + summary.delivery.pending + ); + log::info!("summary={}", summary_path.display()); + log::info!("events={}", events_path.display()); +} + +async fn sleep_until_ms(target_ms: u64) { + let now_ms = crate::summary::now_millis(); + if target_ms > now_ms { + tokio::time::sleep(Duration::from_millis(target_ms - now_ms)).await; + } +} diff --git a/tools/tpcc-runner/src/loader.rs b/tools/tpcc-runner/src/loader.rs new file mode 100644 index 00000000000..45c0f86c60d --- /dev/null +++ b/tools/tpcc-runner/src/loader.rs @@ -0,0 +1,302 @@ +use anyhow::{Context, Result}; +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use std::time::SystemTime; + +use crate::client::ModuleClient; +use crate::config::LoadConfig; +use crate::module_bindings::*; +use crate::tpcc::*; +use spacetimedb_sdk::Timestamp; + +const WAREHOUSE_YTD_CENTS: i64 = 30_000_000; +const DISTRICT_YTD_CENTS: i64 = 3_000_000; +const CUSTOMER_CREDIT_LIMIT_CENTS: i64 = 5_000_000; +const CUSTOMER_INITIAL_BALANCE_CENTS: i64 = -1_000; +const CUSTOMER_INITIAL_YTD_PAYMENT_CENTS: i64 = 1_000; +const HISTORY_INITIAL_AMOUNT_CENTS: i64 = 1_000; + +pub async fn run(config: LoadConfig) -> Result<()> { + log::info!( + "loading tpcc dataset into {} / {} with {} warehouse(s)", + config.connection.uri, + config.connection.database, + config.warehouses + ); + let client = ModuleClient::connect(&config.connection).await?; + if config.reset { + client.reset_tpcc().await.context("failed to reset tpcc data")?; + } + + let mut rng = StdRng::seed_from_u64(0x5eed_5eed); + let load_c_last = rng.random_range(0..=255); + let base_ts = Timestamp::from(SystemTime::now()); + + load_items(&client, config.batch_size, &mut rng).await?; + load_warehouses_and_districts(&client, config.warehouses, config.batch_size, base_ts, &mut rng).await?; + load_stock(&client, config.warehouses, config.batch_size, &mut rng).await?; + load_customers_history_orders( + &client, + config.warehouses, + config.batch_size, + base_ts, + load_c_last, + &mut rng, + ) + .await?; + + client.shutdown().await; + log::info!("tpcc load finished"); + Ok(()) +} + +async fn load_items(client: &ModuleClient, batch_size: usize, rng: &mut StdRng) -> Result<()> { + let mut batch = Vec::with_capacity(batch_size); + for item_id in 1..=ITEMS { + batch.push(Item { + i_id: item_id, + i_im_id: rng.random_range(1..=10_000), + i_name: alpha_numeric_string(rng, 14, 24), + i_price_cents: rng.random_range(100..=10_000), + i_data: maybe_with_original(rng, 26, 50), + }); + if batch.len() >= batch_size { + client.load_items(std::mem::take(&mut batch)).await?; + } + } + if !batch.is_empty() { + client.load_items(batch).await?; + } + Ok(()) +} + +async fn load_warehouses_and_districts( + client: &ModuleClient, + warehouses: u16, + batch_size: usize, + timestamp: Timestamp, + rng: &mut StdRng, +) -> Result<()> { + let mut warehouse_batch = Vec::with_capacity(batch_size); + let mut district_batch = Vec::with_capacity(batch_size); + + for w_id in 1..=warehouses { + warehouse_batch.push(Warehouse { + w_id, + w_name: alpha_string(rng, 6, 10), + w_street_1: alpha_numeric_string(rng, 10, 20), + w_street_2: alpha_numeric_string(rng, 10, 20), + w_city: alpha_string(rng, 10, 20), + w_state: alpha_string(rng, 2, 2), + w_zip: zip_code(rng), + w_tax_bps: rng.random_range(0..=2_000), + w_ytd_cents: WAREHOUSE_YTD_CENTS, + }); + + for d_id in 1..=DISTRICTS_PER_WAREHOUSE { + district_batch.push(District { + d_w_id: w_id, + d_id, + d_name: alpha_string(rng, 6, 10), + d_street_1: alpha_numeric_string(rng, 10, 20), + d_street_2: alpha_numeric_string(rng, 10, 20), + d_city: alpha_string(rng, 10, 20), + d_state: alpha_string(rng, 2, 2), + d_zip: zip_code(rng), + d_tax_bps: rng.random_range(0..=2_000), + d_ytd_cents: DISTRICT_YTD_CENTS, + d_next_o_id: CUSTOMERS_PER_DISTRICT + 1, + }); + } + } + + while !warehouse_batch.is_empty() { + let split_at = warehouse_batch.len().min(batch_size); + let remainder = warehouse_batch.split_off(split_at); + let rows = std::mem::replace(&mut warehouse_batch, remainder); + client.load_warehouses(rows).await?; + } + while !district_batch.is_empty() { + let split_at = district_batch.len().min(batch_size); + let remainder = district_batch.split_off(split_at); + let rows = std::mem::replace(&mut district_batch, remainder); + client.load_districts(rows).await?; + } + let _ = timestamp; + Ok(()) +} + +async fn load_stock(client: &ModuleClient, warehouses: u16, batch_size: usize, rng: &mut StdRng) -> Result<()> { + let mut batch = Vec::with_capacity(batch_size); + for w_id in 1..=warehouses { + for item_id in 1..=ITEMS { + batch.push(Stock { + s_w_id: w_id, + s_i_id: item_id, + s_quantity: rng.random_range(10..=100), + s_dist_01: alpha_string(rng, 24, 24), + s_dist_02: alpha_string(rng, 24, 24), + s_dist_03: alpha_string(rng, 24, 24), + s_dist_04: alpha_string(rng, 24, 24), + s_dist_05: alpha_string(rng, 24, 24), + s_dist_06: alpha_string(rng, 24, 24), + s_dist_07: alpha_string(rng, 24, 24), + s_dist_08: alpha_string(rng, 24, 24), + s_dist_09: alpha_string(rng, 24, 24), + s_dist_10: alpha_string(rng, 24, 24), + s_ytd: 0, + s_order_cnt: 0, + s_remote_cnt: 0, + s_data: maybe_with_original(rng, 26, 50), + }); + if batch.len() >= batch_size { + client.load_stocks(std::mem::take(&mut batch)).await?; + } + } + } + if !batch.is_empty() { + client.load_stocks(batch).await?; + } + Ok(()) +} + +async fn load_customers_history_orders( + client: &ModuleClient, + warehouses: u16, + batch_size: usize, + timestamp: Timestamp, + load_c_last: u32, + rng: &mut StdRng, +) -> Result<()> { + let mut customer_batch = Vec::with_capacity(batch_size); + let mut history_batch = Vec::with_capacity(batch_size); + let mut order_batch = Vec::with_capacity(batch_size); + let mut new_order_batch = Vec::with_capacity(batch_size); + let mut order_line_batch = Vec::with_capacity(batch_size); + + for w_id in 1..=warehouses { + for d_id in 1..=DISTRICTS_PER_WAREHOUSE { + let mut permutation: Vec = (1..=CUSTOMERS_PER_DISTRICT).collect(); + permutation.shuffle(rng); + + for c_id in 1..=CUSTOMERS_PER_DISTRICT { + let credit = if rng.random_bool(0.10) { "BC" } else { "GC" }; + let last_name = if c_id <= 1_000 { + make_last_name(c_id - 1) + } else { + make_last_name(nurand(rng, 255, 0, 999, load_c_last)) + }; + customer_batch.push(Customer { + c_w_id: w_id, + c_d_id: d_id, + c_id, + c_first: alpha_string(rng, 8, 16), + c_middle: "OE".to_string(), + c_last: last_name, + c_street_1: alpha_numeric_string(rng, 10, 20), + c_street_2: alpha_numeric_string(rng, 10, 20), + c_city: alpha_string(rng, 10, 20), + c_state: alpha_string(rng, 2, 2), + c_zip: zip_code(rng), + c_phone: numeric_string(rng, 16, 16), + c_since: timestamp, + c_credit: credit.to_string(), + c_credit_lim_cents: CUSTOMER_CREDIT_LIMIT_CENTS, + c_discount_bps: rng.random_range(0..=5_000), + c_balance_cents: CUSTOMER_INITIAL_BALANCE_CENTS, + c_ytd_payment_cents: CUSTOMER_INITIAL_YTD_PAYMENT_CENTS, + c_payment_cnt: 1, + c_delivery_cnt: 0, + c_data: alpha_numeric_string(rng, 300, 500), + }); + history_batch.push(History { + history_id: 0, + h_c_id: c_id, + h_c_d_id: d_id, + h_c_w_id: w_id, + h_d_id: d_id, + h_w_id: w_id, + h_date: timestamp, + h_amount_cents: HISTORY_INITIAL_AMOUNT_CENTS, + h_data: alpha_numeric_string(rng, 12, 24), + }); + + if customer_batch.len() >= batch_size { + client.load_customers(std::mem::take(&mut customer_batch)).await?; + } + if history_batch.len() >= batch_size { + client.load_history(std::mem::take(&mut history_batch)).await?; + } + } + + for o_id in 1..=CUSTOMERS_PER_DISTRICT { + let customer_id = permutation[(o_id - 1) as usize]; + let delivered = o_id < NEW_ORDER_START; + let order_line_count = rng.random_range(5..=15) as u8; + order_batch.push(OOrder { + o_w_id: w_id, + o_d_id: d_id, + o_id, + o_c_id: customer_id, + o_entry_d: timestamp, + o_carrier_id: if delivered { + Some(rng.random_range(1..=10)) + } else { + None + }, + o_ol_cnt: order_line_count, + o_all_local: true, + }); + if !delivered { + new_order_batch.push(NewOrder { + no_w_id: w_id, + no_d_id: d_id, + no_o_id: o_id, + }); + } + + for ol_number in 1..=order_line_count { + order_line_batch.push(OrderLine { + ol_w_id: w_id, + ol_d_id: d_id, + ol_o_id: o_id, + ol_number, + ol_i_id: rng.random_range(1..=ITEMS), + ol_supply_w_id: w_id, + ol_delivery_d: if delivered { Some(timestamp) } else { None }, + ol_quantity: 5, + ol_amount_cents: if delivered { 0 } else { rng.random_range(1..=999_999) }, + ol_dist_info: alpha_string(rng, 24, 24), + }); + if order_line_batch.len() >= batch_size { + client.load_order_lines(std::mem::take(&mut order_line_batch)).await?; + } + } + + if order_batch.len() >= batch_size { + client.load_orders(std::mem::take(&mut order_batch)).await?; + } + if new_order_batch.len() >= batch_size { + client.load_new_orders(std::mem::take(&mut new_order_batch)).await?; + } + } + } + } + + if !customer_batch.is_empty() { + client.load_customers(customer_batch).await?; + } + if !history_batch.is_empty() { + client.load_history(history_batch).await?; + } + if !order_batch.is_empty() { + client.load_orders(order_batch).await?; + } + if !new_order_batch.is_empty() { + client.load_new_orders(new_order_batch).await?; + } + if !order_line_batch.is_empty() { + client.load_order_lines(order_line_batch).await?; + } + + Ok(()) +} diff --git a/tools/tpcc-runner/src/main.rs b/tools/tpcc-runner/src/main.rs new file mode 100644 index 00000000000..3fdff566284 --- /dev/null +++ b/tools/tpcc-runner/src/main.rs @@ -0,0 +1,27 @@ +mod client; +mod config; +mod coordinator; +mod driver; +mod loader; +mod module_bindings; +mod protocol; +mod summary; +mod tpcc; + +use clap::Parser; +use config::{Cli, Command, FileConfig}; +use env_logger::Env; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + env_logger::Builder::from_env(Env::default().default_filter_or("tpcc_runner=info")).init(); + + let cli = Cli::parse(); + let file_config = FileConfig::load(cli.config.as_deref())?; + + match cli.command { + Command::Load(args) => loader::run(args.resolve(&file_config)).await, + Command::Driver(args) => driver::run(args.resolve(&file_config)?).await, + Command::Coordinator(args) => coordinator::run(args.resolve(&file_config)?).await, + } +} diff --git a/tools/tpcc-runner/src/module_bindings/customer_selector_type.rs b/tools/tpcc-runner/src/module_bindings/customer_selector_type.rs new file mode 100644 index 00000000000..d2b3ae50211 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/customer_selector_type.rs @@ -0,0 +1,17 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub enum CustomerSelector { + ById(u32), + + ByLastName(String), +} + +impl __sdk::InModule for CustomerSelector { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/customer_type.rs b/tools/tpcc-runner/src/module_bindings/customer_type.rs new file mode 100644 index 00000000000..ed4e9d477a8 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/customer_type.rs @@ -0,0 +1,105 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Customer { + pub c_w_id: u16, + pub c_d_id: u8, + pub c_id: u32, + pub c_first: String, + pub c_middle: String, + pub c_last: String, + pub c_street_1: String, + pub c_street_2: String, + pub c_city: String, + pub c_state: String, + pub c_zip: String, + pub c_phone: String, + pub c_since: __sdk::Timestamp, + pub c_credit: String, + pub c_credit_lim_cents: i64, + pub c_discount_bps: i32, + pub c_balance_cents: i64, + pub c_ytd_payment_cents: i64, + pub c_payment_cnt: u32, + pub c_delivery_cnt: u32, + pub c_data: String, +} + +impl __sdk::InModule for Customer { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Customer`. +/// +/// Provides typed access to columns for query building. +pub struct CustomerCols { + pub c_w_id: __sdk::__query_builder::Col, + pub c_d_id: __sdk::__query_builder::Col, + pub c_id: __sdk::__query_builder::Col, + pub c_first: __sdk::__query_builder::Col, + pub c_middle: __sdk::__query_builder::Col, + pub c_last: __sdk::__query_builder::Col, + pub c_street_1: __sdk::__query_builder::Col, + pub c_street_2: __sdk::__query_builder::Col, + pub c_city: __sdk::__query_builder::Col, + pub c_state: __sdk::__query_builder::Col, + pub c_zip: __sdk::__query_builder::Col, + pub c_phone: __sdk::__query_builder::Col, + pub c_since: __sdk::__query_builder::Col, + pub c_credit: __sdk::__query_builder::Col, + pub c_credit_lim_cents: __sdk::__query_builder::Col, + pub c_discount_bps: __sdk::__query_builder::Col, + pub c_balance_cents: __sdk::__query_builder::Col, + pub c_ytd_payment_cents: __sdk::__query_builder::Col, + pub c_payment_cnt: __sdk::__query_builder::Col, + pub c_delivery_cnt: __sdk::__query_builder::Col, + pub c_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Customer { + type Cols = CustomerCols; + fn cols(table_name: &'static str) -> Self::Cols { + CustomerCols { + c_w_id: __sdk::__query_builder::Col::new(table_name, "c_w_id"), + c_d_id: __sdk::__query_builder::Col::new(table_name, "c_d_id"), + c_id: __sdk::__query_builder::Col::new(table_name, "c_id"), + c_first: __sdk::__query_builder::Col::new(table_name, "c_first"), + c_middle: __sdk::__query_builder::Col::new(table_name, "c_middle"), + c_last: __sdk::__query_builder::Col::new(table_name, "c_last"), + c_street_1: __sdk::__query_builder::Col::new(table_name, "c_street_1"), + c_street_2: __sdk::__query_builder::Col::new(table_name, "c_street_2"), + c_city: __sdk::__query_builder::Col::new(table_name, "c_city"), + c_state: __sdk::__query_builder::Col::new(table_name, "c_state"), + c_zip: __sdk::__query_builder::Col::new(table_name, "c_zip"), + c_phone: __sdk::__query_builder::Col::new(table_name, "c_phone"), + c_since: __sdk::__query_builder::Col::new(table_name, "c_since"), + c_credit: __sdk::__query_builder::Col::new(table_name, "c_credit"), + c_credit_lim_cents: __sdk::__query_builder::Col::new(table_name, "c_credit_lim_cents"), + c_discount_bps: __sdk::__query_builder::Col::new(table_name, "c_discount_bps"), + c_balance_cents: __sdk::__query_builder::Col::new(table_name, "c_balance_cents"), + c_ytd_payment_cents: __sdk::__query_builder::Col::new(table_name, "c_ytd_payment_cents"), + c_payment_cnt: __sdk::__query_builder::Col::new(table_name, "c_payment_cnt"), + c_delivery_cnt: __sdk::__query_builder::Col::new(table_name, "c_delivery_cnt"), + c_data: __sdk::__query_builder::Col::new(table_name, "c_data"), + } + } +} + +/// Indexed column accessor struct for the table `Customer`. +/// +/// Provides typed access to indexed columns for query building. +pub struct CustomerIxCols {} + +impl __sdk::__query_builder::HasIxCols for Customer { + type IxCols = CustomerIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + CustomerIxCols {} + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Customer {} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_completion_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_completion_type.rs new file mode 100644 index 00000000000..9ede637f78a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_completion_type.rs @@ -0,0 +1,79 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryCompletion { + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: __sdk::Timestamp, + pub completed_at: __sdk::Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +impl __sdk::InModule for DeliveryCompletion { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `DeliveryCompletion`. +/// +/// Provides typed access to columns for query building. +pub struct DeliveryCompletionCols { + pub completion_id: __sdk::__query_builder::Col, + pub run_id: __sdk::__query_builder::Col, + pub driver_id: __sdk::__query_builder::Col, + pub terminal_id: __sdk::__query_builder::Col, + pub request_id: __sdk::__query_builder::Col, + pub warehouse_id: __sdk::__query_builder::Col, + pub carrier_id: __sdk::__query_builder::Col, + pub queued_at: __sdk::__query_builder::Col, + pub completed_at: __sdk::__query_builder::Col, + pub skipped_districts: __sdk::__query_builder::Col, + pub processed_districts: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for DeliveryCompletion { + type Cols = DeliveryCompletionCols; + fn cols(table_name: &'static str) -> Self::Cols { + DeliveryCompletionCols { + completion_id: __sdk::__query_builder::Col::new(table_name, "completion_id"), + run_id: __sdk::__query_builder::Col::new(table_name, "run_id"), + driver_id: __sdk::__query_builder::Col::new(table_name, "driver_id"), + terminal_id: __sdk::__query_builder::Col::new(table_name, "terminal_id"), + request_id: __sdk::__query_builder::Col::new(table_name, "request_id"), + warehouse_id: __sdk::__query_builder::Col::new(table_name, "warehouse_id"), + carrier_id: __sdk::__query_builder::Col::new(table_name, "carrier_id"), + queued_at: __sdk::__query_builder::Col::new(table_name, "queued_at"), + completed_at: __sdk::__query_builder::Col::new(table_name, "completed_at"), + skipped_districts: __sdk::__query_builder::Col::new(table_name, "skipped_districts"), + processed_districts: __sdk::__query_builder::Col::new(table_name, "processed_districts"), + } + } +} + +/// Indexed column accessor struct for the table `DeliveryCompletion`. +/// +/// Provides typed access to indexed columns for query building. +pub struct DeliveryCompletionIxCols { + pub completion_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for DeliveryCompletion { + type IxCols = DeliveryCompletionIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + DeliveryCompletionIxCols { + completion_id: __sdk::__query_builder::IxCol::new(table_name, "completion_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for DeliveryCompletion {} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_completion_view_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_completion_view_type.rs new file mode 100644 index 00000000000..97e7a7c2d60 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_completion_view_type.rs @@ -0,0 +1,25 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryCompletionView { + pub completion_id: u64, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub warehouse_id: u16, + pub carrier_id: u8, + pub queued_at: __sdk::Timestamp, + pub completed_at: __sdk::Timestamp, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +impl __sdk::InModule for DeliveryCompletionView { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_job_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_job_type.rs new file mode 100644 index 00000000000..84c44ea55cc --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_job_type.rs @@ -0,0 +1,84 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryJob { + pub scheduled_id: u64, + pub scheduled_at: __sdk::ScheduleAt, + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub queued_at: __sdk::Timestamp, + pub w_id: u16, + pub carrier_id: u8, + pub next_d_id: u8, + pub skipped_districts: u8, + pub processed_districts: u8, +} + +impl __sdk::InModule for DeliveryJob { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `DeliveryJob`. +/// +/// Provides typed access to columns for query building. +pub struct DeliveryJobCols { + pub scheduled_id: __sdk::__query_builder::Col, + pub scheduled_at: __sdk::__query_builder::Col, + pub run_id: __sdk::__query_builder::Col, + pub driver_id: __sdk::__query_builder::Col, + pub terminal_id: __sdk::__query_builder::Col, + pub request_id: __sdk::__query_builder::Col, + pub queued_at: __sdk::__query_builder::Col, + pub w_id: __sdk::__query_builder::Col, + pub carrier_id: __sdk::__query_builder::Col, + pub next_d_id: __sdk::__query_builder::Col, + pub skipped_districts: __sdk::__query_builder::Col, + pub processed_districts: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for DeliveryJob { + type Cols = DeliveryJobCols; + fn cols(table_name: &'static str) -> Self::Cols { + DeliveryJobCols { + scheduled_id: __sdk::__query_builder::Col::new(table_name, "scheduled_id"), + scheduled_at: __sdk::__query_builder::Col::new(table_name, "scheduled_at"), + run_id: __sdk::__query_builder::Col::new(table_name, "run_id"), + driver_id: __sdk::__query_builder::Col::new(table_name, "driver_id"), + terminal_id: __sdk::__query_builder::Col::new(table_name, "terminal_id"), + request_id: __sdk::__query_builder::Col::new(table_name, "request_id"), + queued_at: __sdk::__query_builder::Col::new(table_name, "queued_at"), + w_id: __sdk::__query_builder::Col::new(table_name, "w_id"), + carrier_id: __sdk::__query_builder::Col::new(table_name, "carrier_id"), + next_d_id: __sdk::__query_builder::Col::new(table_name, "next_d_id"), + skipped_districts: __sdk::__query_builder::Col::new(table_name, "skipped_districts"), + processed_districts: __sdk::__query_builder::Col::new(table_name, "processed_districts"), + } + } +} + +/// Indexed column accessor struct for the table `DeliveryJob`. +/// +/// Provides typed access to indexed columns for query building. +pub struct DeliveryJobIxCols { + pub run_id: __sdk::__query_builder::IxCol, + pub scheduled_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for DeliveryJob { + type IxCols = DeliveryJobIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + DeliveryJobIxCols { + run_id: __sdk::__query_builder::IxCol::new(table_name, "run_id"), + scheduled_id: __sdk::__query_builder::IxCol::new(table_name, "scheduled_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for DeliveryJob {} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs b/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs new file mode 100644 index 00000000000..697c941b658 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_progress_procedure.rs @@ -0,0 +1,54 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_progress_type::DeliveryProgress; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct DeliveryProgressArgs { + pub run_id: String, +} + +impl __sdk::InModule for DeliveryProgressArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `delivery_progress`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait delivery_progress { + fn delivery_progress(&self, run_id: String) { + self.delivery_progress_then(run_id, |_, _| {}); + } + + fn delivery_progress_then( + &self, + run_id: String, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl delivery_progress for super::RemoteProcedures { + fn delivery_progress_then( + &self, + run_id: String, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "delivery_progress", + DeliveryProgressArgs { run_id }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_progress_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_progress_type.rs new file mode 100644 index 00000000000..f2c09493a61 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_progress_type.rs @@ -0,0 +1,17 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryProgress { + pub run_id: String, + pub pending_jobs: u64, + pub completed_jobs: u64, +} + +impl __sdk::InModule for DeliveryProgress { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/delivery_queue_ack_type.rs b/tools/tpcc-runner/src/module_bindings/delivery_queue_ack_type.rs new file mode 100644 index 00000000000..1941feda7e0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/delivery_queue_ack_type.rs @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct DeliveryQueueAck { + pub scheduled_id: u64, + pub queued_at: __sdk::Timestamp, + pub warehouse_id: u16, + pub carrier_id: u8, +} + +impl __sdk::InModule for DeliveryQueueAck { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/district_type.rs b/tools/tpcc-runner/src/module_bindings/district_type.rs new file mode 100644 index 00000000000..808fed7ffc0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/district_type.rs @@ -0,0 +1,75 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct District { + pub d_w_id: u16, + pub d_id: u8, + pub d_name: String, + pub d_street_1: String, + pub d_street_2: String, + pub d_city: String, + pub d_state: String, + pub d_zip: String, + pub d_tax_bps: i32, + pub d_ytd_cents: i64, + pub d_next_o_id: u32, +} + +impl __sdk::InModule for District { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `District`. +/// +/// Provides typed access to columns for query building. +pub struct DistrictCols { + pub d_w_id: __sdk::__query_builder::Col, + pub d_id: __sdk::__query_builder::Col, + pub d_name: __sdk::__query_builder::Col, + pub d_street_1: __sdk::__query_builder::Col, + pub d_street_2: __sdk::__query_builder::Col, + pub d_city: __sdk::__query_builder::Col, + pub d_state: __sdk::__query_builder::Col, + pub d_zip: __sdk::__query_builder::Col, + pub d_tax_bps: __sdk::__query_builder::Col, + pub d_ytd_cents: __sdk::__query_builder::Col, + pub d_next_o_id: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for District { + type Cols = DistrictCols; + fn cols(table_name: &'static str) -> Self::Cols { + DistrictCols { + d_w_id: __sdk::__query_builder::Col::new(table_name, "d_w_id"), + d_id: __sdk::__query_builder::Col::new(table_name, "d_id"), + d_name: __sdk::__query_builder::Col::new(table_name, "d_name"), + d_street_1: __sdk::__query_builder::Col::new(table_name, "d_street_1"), + d_street_2: __sdk::__query_builder::Col::new(table_name, "d_street_2"), + d_city: __sdk::__query_builder::Col::new(table_name, "d_city"), + d_state: __sdk::__query_builder::Col::new(table_name, "d_state"), + d_zip: __sdk::__query_builder::Col::new(table_name, "d_zip"), + d_tax_bps: __sdk::__query_builder::Col::new(table_name, "d_tax_bps"), + d_ytd_cents: __sdk::__query_builder::Col::new(table_name, "d_ytd_cents"), + d_next_o_id: __sdk::__query_builder::Col::new(table_name, "d_next_o_id"), + } + } +} + +/// Indexed column accessor struct for the table `District`. +/// +/// Provides typed access to indexed columns for query building. +pub struct DistrictIxCols {} + +impl __sdk::__query_builder::HasIxCols for District { + type IxCols = DistrictIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + DistrictIxCols {} + } +} + +impl __sdk::__query_builder::CanBeLookupTable for District {} diff --git a/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs b/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs new file mode 100644 index 00000000000..23760be1e9a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/fetch_delivery_completions_procedure.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_completion_view_type::DeliveryCompletionView; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct FetchDeliveryCompletionsArgs { + pub run_id: String, + pub after_completion_id: u64, + pub limit: u32, +} + +impl __sdk::InModule for FetchDeliveryCompletionsArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `fetch_delivery_completions`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait fetch_delivery_completions { + fn fetch_delivery_completions(&self, run_id: String, after_completion_id: u64, limit: u32) { + self.fetch_delivery_completions_then(run_id, after_completion_id, limit, |_, _| {}); + } + + fn fetch_delivery_completions_then( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, String>, __sdk::InternalError>, + ) + Send + + 'static, + ); +} + +impl fetch_delivery_completions for super::RemoteProcedures { + fn fetch_delivery_completions_then( + &self, + run_id: String, + after_completion_id: u64, + limit: u32, + + __callback: impl FnOnce( + &super::ProcedureEventContext, + Result, String>, __sdk::InternalError>, + ) + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result, String>>( + "fetch_delivery_completions", + FetchDeliveryCompletionsArgs { + run_id, + after_completion_id, + limit, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/history_type.rs b/tools/tpcc-runner/src/module_bindings/history_type.rs new file mode 100644 index 00000000000..fcfea55eb56 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/history_type.rs @@ -0,0 +1,73 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct History { + pub history_id: u64, + pub h_c_id: u32, + pub h_c_d_id: u8, + pub h_c_w_id: u16, + pub h_d_id: u8, + pub h_w_id: u16, + pub h_date: __sdk::Timestamp, + pub h_amount_cents: i64, + pub h_data: String, +} + +impl __sdk::InModule for History { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `History`. +/// +/// Provides typed access to columns for query building. +pub struct HistoryCols { + pub history_id: __sdk::__query_builder::Col, + pub h_c_id: __sdk::__query_builder::Col, + pub h_c_d_id: __sdk::__query_builder::Col, + pub h_c_w_id: __sdk::__query_builder::Col, + pub h_d_id: __sdk::__query_builder::Col, + pub h_w_id: __sdk::__query_builder::Col, + pub h_date: __sdk::__query_builder::Col, + pub h_amount_cents: __sdk::__query_builder::Col, + pub h_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for History { + type Cols = HistoryCols; + fn cols(table_name: &'static str) -> Self::Cols { + HistoryCols { + history_id: __sdk::__query_builder::Col::new(table_name, "history_id"), + h_c_id: __sdk::__query_builder::Col::new(table_name, "h_c_id"), + h_c_d_id: __sdk::__query_builder::Col::new(table_name, "h_c_d_id"), + h_c_w_id: __sdk::__query_builder::Col::new(table_name, "h_c_w_id"), + h_d_id: __sdk::__query_builder::Col::new(table_name, "h_d_id"), + h_w_id: __sdk::__query_builder::Col::new(table_name, "h_w_id"), + h_date: __sdk::__query_builder::Col::new(table_name, "h_date"), + h_amount_cents: __sdk::__query_builder::Col::new(table_name, "h_amount_cents"), + h_data: __sdk::__query_builder::Col::new(table_name, "h_data"), + } + } +} + +/// Indexed column accessor struct for the table `History`. +/// +/// Provides typed access to indexed columns for query building. +pub struct HistoryIxCols { + pub history_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for History { + type IxCols = HistoryIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + HistoryIxCols { + history_id: __sdk::__query_builder::IxCol::new(table_name, "history_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for History {} diff --git a/tools/tpcc-runner/src/module_bindings/item_type.rs b/tools/tpcc-runner/src/module_bindings/item_type.rs new file mode 100644 index 00000000000..d1382e5fc61 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/item_type.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Item { + pub i_id: u32, + pub i_im_id: u32, + pub i_name: String, + pub i_price_cents: i64, + pub i_data: String, +} + +impl __sdk::InModule for Item { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Item`. +/// +/// Provides typed access to columns for query building. +pub struct ItemCols { + pub i_id: __sdk::__query_builder::Col, + pub i_im_id: __sdk::__query_builder::Col, + pub i_name: __sdk::__query_builder::Col, + pub i_price_cents: __sdk::__query_builder::Col, + pub i_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Item { + type Cols = ItemCols; + fn cols(table_name: &'static str) -> Self::Cols { + ItemCols { + i_id: __sdk::__query_builder::Col::new(table_name, "i_id"), + i_im_id: __sdk::__query_builder::Col::new(table_name, "i_im_id"), + i_name: __sdk::__query_builder::Col::new(table_name, "i_name"), + i_price_cents: __sdk::__query_builder::Col::new(table_name, "i_price_cents"), + i_data: __sdk::__query_builder::Col::new(table_name, "i_data"), + } + } +} + +/// Indexed column accessor struct for the table `Item`. +/// +/// Provides typed access to indexed columns for query building. +pub struct ItemIxCols { + pub i_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for Item { + type IxCols = ItemIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + ItemIxCols { + i_id: __sdk::__query_builder::IxCol::new(table_name, "i_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Item {} diff --git a/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs new file mode 100644 index 00000000000..68000e9611b --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_customers_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_type::Customer; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadCustomersArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadCustomersArgs) -> Self { + Self::LoadCustomers { rows: args.rows } + } +} + +impl __sdk::InModule for LoadCustomersArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_customers`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_customers { + /// Request that the remote module invoke the reducer `load_customers` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_customers:load_customers_then`] to run a callback after the reducer completes. + fn load_customers(&self, rows: Vec) -> __sdk::Result<()> { + self.load_customers_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_customers` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_customers_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_customers for super::RemoteReducers { + fn load_customers_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadCustomersArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs new file mode 100644 index 00000000000..4d9e6c75cd2 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_districts_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::district_type::District; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadDistrictsArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadDistrictsArgs) -> Self { + Self::LoadDistricts { rows: args.rows } + } +} + +impl __sdk::InModule for LoadDistrictsArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_districts`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_districts { + /// Request that the remote module invoke the reducer `load_districts` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_districts:load_districts_then`] to run a callback after the reducer completes. + fn load_districts(&self, rows: Vec) -> __sdk::Result<()> { + self.load_districts_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_districts` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_districts_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_districts for super::RemoteReducers { + fn load_districts_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadDistrictsArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs new file mode 100644 index 00000000000..73517bccebe --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_history_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::history_type::History; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadHistoryArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadHistoryArgs) -> Self { + Self::LoadHistory { rows: args.rows } + } +} + +impl __sdk::InModule for LoadHistoryArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_history`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_history { + /// Request that the remote module invoke the reducer `load_history` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_history:load_history_then`] to run a callback after the reducer completes. + fn load_history(&self, rows: Vec) -> __sdk::Result<()> { + self.load_history_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_history` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_history_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_history for super::RemoteReducers { + fn load_history_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadHistoryArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs new file mode 100644 index 00000000000..7cc306270ae --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_items_reducer.rs @@ -0,0 +1,67 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::item_type::Item; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadItemsArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadItemsArgs) -> Self { + Self::LoadItems { rows: args.rows } + } +} + +impl __sdk::InModule for LoadItemsArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_items`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_items { + /// Request that the remote module invoke the reducer `load_items` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_items:load_items_then`] to run a callback after the reducer completes. + fn load_items(&self, rows: Vec) -> __sdk::Result<()> { + self.load_items_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_items` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_items_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_items for super::RemoteReducers { + fn load_items_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(LoadItemsArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs new file mode 100644 index 00000000000..d79b6550953 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_new_orders_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_type::NewOrder; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadNewOrdersArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadNewOrdersArgs) -> Self { + Self::LoadNewOrders { rows: args.rows } + } +} + +impl __sdk::InModule for LoadNewOrdersArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_new_orders`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_new_orders { + /// Request that the remote module invoke the reducer `load_new_orders` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_new_orders:load_new_orders_then`] to run a callback after the reducer completes. + fn load_new_orders(&self, rows: Vec) -> __sdk::Result<()> { + self.load_new_orders_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_new_orders` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_new_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_new_orders for super::RemoteReducers { + fn load_new_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadNewOrdersArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs new file mode 100644 index 00000000000..189f862f8a5 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_order_lines_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::order_line_type::OrderLine; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadOrderLinesArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadOrderLinesArgs) -> Self { + Self::LoadOrderLines { rows: args.rows } + } +} + +impl __sdk::InModule for LoadOrderLinesArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_order_lines`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_order_lines { + /// Request that the remote module invoke the reducer `load_order_lines` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_order_lines:load_order_lines_then`] to run a callback after the reducer completes. + fn load_order_lines(&self, rows: Vec) -> __sdk::Result<()> { + self.load_order_lines_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_order_lines` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_order_lines_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_order_lines for super::RemoteReducers { + fn load_order_lines_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadOrderLinesArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs new file mode 100644 index 00000000000..a72bb0a9235 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_orders_reducer.rs @@ -0,0 +1,67 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::o_order_type::OOrder; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadOrdersArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadOrdersArgs) -> Self { + Self::LoadOrders { rows: args.rows } + } +} + +impl __sdk::InModule for LoadOrdersArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_orders`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_orders { + /// Request that the remote module invoke the reducer `load_orders` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_orders:load_orders_then`] to run a callback after the reducer completes. + fn load_orders(&self, rows: Vec) -> __sdk::Result<()> { + self.load_orders_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_orders` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_orders for super::RemoteReducers { + fn load_orders_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(LoadOrdersArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs new file mode 100644 index 00000000000..89d3f80bf7b --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_stocks_reducer.rs @@ -0,0 +1,67 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::stock_type::Stock; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadStocksArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadStocksArgs) -> Self { + Self::LoadStocks { rows: args.rows } + } +} + +impl __sdk::InModule for LoadStocksArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_stocks`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_stocks { + /// Request that the remote module invoke the reducer `load_stocks` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_stocks:load_stocks_then`] to run a callback after the reducer completes. + fn load_stocks(&self, rows: Vec) -> __sdk::Result<()> { + self.load_stocks_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_stocks` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_stocks_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_stocks for super::RemoteReducers { + fn load_stocks_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(LoadStocksArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs b/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs new file mode 100644 index 00000000000..b6986a465b0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/load_warehouses_reducer.rs @@ -0,0 +1,68 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::warehouse_type::Warehouse; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct LoadWarehousesArgs { + pub rows: Vec, +} + +impl From for super::Reducer { + fn from(args: LoadWarehousesArgs) -> Self { + Self::LoadWarehouses { rows: args.rows } + } +} + +impl __sdk::InModule for LoadWarehousesArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `load_warehouses`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait load_warehouses { + /// Request that the remote module invoke the reducer `load_warehouses` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`load_warehouses:load_warehouses_then`] to run a callback after the reducer completes. + fn load_warehouses(&self, rows: Vec) -> __sdk::Result<()> { + self.load_warehouses_then(rows, |_, _| {}) + } + + /// Request that the remote module invoke the reducer `load_warehouses` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn load_warehouses_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl load_warehouses for super::RemoteReducers { + fn load_warehouses_then( + &self, + rows: Vec, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp + .invoke_reducer_with_callback(LoadWarehousesArgs { rows }, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/mod.rs b/tools/tpcc-runner/src/module_bindings/mod.rs new file mode 100644 index 00000000000..4bca7a5d7da --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/mod.rs @@ -0,0 +1,889 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +// This was generated using spacetimedb cli version 2.1.0 (commit 36c416ff4e2b1546db51145c2bcd65070e36b416). + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +pub mod customer_selector_type; +pub mod customer_type; +pub mod delivery_completion_type; +pub mod delivery_completion_view_type; +pub mod delivery_job_type; +pub mod delivery_progress_procedure; +pub mod delivery_progress_type; +pub mod delivery_queue_ack_type; +pub mod district_type; +pub mod fetch_delivery_completions_procedure; +pub mod history_type; +pub mod item_type; +pub mod load_customers_reducer; +pub mod load_districts_reducer; +pub mod load_history_reducer; +pub mod load_items_reducer; +pub mod load_new_orders_reducer; +pub mod load_order_lines_reducer; +pub mod load_orders_reducer; +pub mod load_stocks_reducer; +pub mod load_warehouses_reducer; +pub mod new_order_line_input_type; +pub mod new_order_line_result_type; +pub mod new_order_procedure; +pub mod new_order_result_type; +pub mod new_order_type; +pub mod o_order_type; +pub mod order_line_type; +pub mod order_status_line_result_type; +pub mod order_status_procedure; +pub mod order_status_result_type; +pub mod payment_procedure; +pub mod payment_result_type; +pub mod queue_delivery_procedure; +pub mod reset_tpcc_reducer; +pub mod stock_level_procedure; +pub mod stock_level_result_type; +pub mod stock_type; +pub mod warehouse_type; + +pub use customer_selector_type::CustomerSelector; +pub use customer_type::Customer; +pub use delivery_completion_type::DeliveryCompletion; +pub use delivery_completion_view_type::DeliveryCompletionView; +pub use delivery_job_type::DeliveryJob; +pub use delivery_progress_procedure::delivery_progress; +pub use delivery_progress_type::DeliveryProgress; +pub use delivery_queue_ack_type::DeliveryQueueAck; +pub use district_type::District; +pub use fetch_delivery_completions_procedure::fetch_delivery_completions; +pub use history_type::History; +pub use item_type::Item; +pub use load_customers_reducer::load_customers; +pub use load_districts_reducer::load_districts; +pub use load_history_reducer::load_history; +pub use load_items_reducer::load_items; +pub use load_new_orders_reducer::load_new_orders; +pub use load_order_lines_reducer::load_order_lines; +pub use load_orders_reducer::load_orders; +pub use load_stocks_reducer::load_stocks; +pub use load_warehouses_reducer::load_warehouses; +pub use new_order_line_input_type::NewOrderLineInput; +pub use new_order_line_result_type::NewOrderLineResult; +pub use new_order_procedure::new_order; +pub use new_order_result_type::NewOrderResult; +pub use new_order_type::NewOrder; +pub use o_order_type::OOrder; +pub use order_line_type::OrderLine; +pub use order_status_line_result_type::OrderStatusLineResult; +pub use order_status_procedure::order_status; +pub use order_status_result_type::OrderStatusResult; +pub use payment_procedure::payment; +pub use payment_result_type::PaymentResult; +pub use queue_delivery_procedure::queue_delivery; +pub use reset_tpcc_reducer::reset_tpcc; +pub use stock_level_procedure::stock_level; +pub use stock_level_result_type::StockLevelResult; +pub use stock_type::Stock; +pub use warehouse_type::Warehouse; + +#[derive(Clone, PartialEq, Debug)] + +/// One of the reducers defined by this module. +/// +/// Contained within a [`__sdk::ReducerEvent`] in [`EventContext`]s for reducer events +/// to indicate which reducer caused the event. + +pub enum Reducer { + LoadCustomers { rows: Vec }, + LoadDistricts { rows: Vec }, + LoadHistory { rows: Vec }, + LoadItems { rows: Vec }, + LoadNewOrders { rows: Vec }, + LoadOrderLines { rows: Vec }, + LoadOrders { rows: Vec }, + LoadStocks { rows: Vec }, + LoadWarehouses { rows: Vec }, + ResetTpcc, +} + +impl __sdk::InModule for Reducer { + type Module = RemoteModule; +} + +impl __sdk::Reducer for Reducer { + fn reducer_name(&self) -> &'static str { + match self { + Reducer::LoadCustomers { .. } => "load_customers", + Reducer::LoadDistricts { .. } => "load_districts", + Reducer::LoadHistory { .. } => "load_history", + Reducer::LoadItems { .. } => "load_items", + Reducer::LoadNewOrders { .. } => "load_new_orders", + Reducer::LoadOrderLines { .. } => "load_order_lines", + Reducer::LoadOrders { .. } => "load_orders", + Reducer::LoadStocks { .. } => "load_stocks", + Reducer::LoadWarehouses { .. } => "load_warehouses", + Reducer::ResetTpcc => "reset_tpcc", + _ => unreachable!(), + } + } + #[allow(clippy::clone_on_copy)] + fn args_bsatn(&self) -> Result, __sats::bsatn::EncodeError> { + match self { + Reducer::LoadCustomers { rows } => { + __sats::bsatn::to_vec(&load_customers_reducer::LoadCustomersArgs { rows: rows.clone() }) + } + Reducer::LoadDistricts { rows } => { + __sats::bsatn::to_vec(&load_districts_reducer::LoadDistrictsArgs { rows: rows.clone() }) + } + Reducer::LoadHistory { rows } => { + __sats::bsatn::to_vec(&load_history_reducer::LoadHistoryArgs { rows: rows.clone() }) + } + Reducer::LoadItems { rows } => { + __sats::bsatn::to_vec(&load_items_reducer::LoadItemsArgs { rows: rows.clone() }) + } + Reducer::LoadNewOrders { rows } => { + __sats::bsatn::to_vec(&load_new_orders_reducer::LoadNewOrdersArgs { rows: rows.clone() }) + } + Reducer::LoadOrderLines { rows } => { + __sats::bsatn::to_vec(&load_order_lines_reducer::LoadOrderLinesArgs { rows: rows.clone() }) + } + Reducer::LoadOrders { rows } => { + __sats::bsatn::to_vec(&load_orders_reducer::LoadOrdersArgs { rows: rows.clone() }) + } + Reducer::LoadStocks { rows } => { + __sats::bsatn::to_vec(&load_stocks_reducer::LoadStocksArgs { rows: rows.clone() }) + } + Reducer::LoadWarehouses { rows } => { + __sats::bsatn::to_vec(&load_warehouses_reducer::LoadWarehousesArgs { rows: rows.clone() }) + } + Reducer::ResetTpcc => __sats::bsatn::to_vec(&reset_tpcc_reducer::ResetTpccArgs {}), + _ => unreachable!(), + } + } +} + +#[derive(Default, Debug)] +#[allow(non_snake_case)] +#[doc(hidden)] +pub struct DbUpdate {} + +impl TryFrom<__ws::v2::TransactionUpdate> for DbUpdate { + type Error = __sdk::Error; + fn try_from(raw: __ws::v2::TransactionUpdate) -> Result { + let mut db_update = DbUpdate::default(); + for table_update in __sdk::transaction_update_iter_table_updates(raw) { + match &table_update.table_name[..] { + unknown => { + return Err(__sdk::InternalError::unknown_name("table", unknown, "DatabaseUpdate").into()); + } + } + } + Ok(db_update) + } +} + +impl __sdk::InModule for DbUpdate { + type Module = RemoteModule; +} + +impl __sdk::DbUpdate for DbUpdate { + fn apply_to_client_cache(&self, cache: &mut __sdk::ClientCache) -> AppliedDiff<'_> { + let mut diff = AppliedDiff::default(); + + diff + } + fn parse_initial_rows(raw: __ws::v2::QueryRows) -> __sdk::Result { + let mut db_update = DbUpdate::default(); + for table_rows in raw.tables { + match &table_rows.table[..] { + unknown => { + return Err(__sdk::InternalError::unknown_name("table", unknown, "QueryRows").into()); + } + } + } + Ok(db_update) + } + fn parse_unsubscribe_rows(raw: __ws::v2::QueryRows) -> __sdk::Result { + let mut db_update = DbUpdate::default(); + for table_rows in raw.tables { + match &table_rows.table[..] { + unknown => { + return Err(__sdk::InternalError::unknown_name("table", unknown, "QueryRows").into()); + } + } + } + Ok(db_update) + } +} + +#[derive(Default)] +#[allow(non_snake_case)] +#[doc(hidden)] +pub struct AppliedDiff<'r> { + __unused: std::marker::PhantomData<&'r ()>, +} + +impl __sdk::InModule for AppliedDiff<'_> { + type Module = RemoteModule; +} + +impl<'r> __sdk::AppliedDiff<'r> for AppliedDiff<'r> { + fn invoke_row_callbacks(&self, event: &EventContext, callbacks: &mut __sdk::DbCallbacks) {} +} + +#[doc(hidden)] +#[derive(Debug)] +pub struct RemoteModule; + +impl __sdk::InModule for RemoteModule { + type Module = Self; +} + +/// The `reducers` field of [`EventContext`] and [`DbConnection`], +/// with methods provided by extension traits for each reducer defined by the module. +pub struct RemoteReducers { + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for RemoteReducers { + type Module = RemoteModule; +} + +/// The `procedures` field of [`DbConnection`] and other [`DbContext`] types, +/// with methods provided by extension traits for each procedure defined by the module. +pub struct RemoteProcedures { + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for RemoteProcedures { + type Module = RemoteModule; +} + +/// The `db` field of [`EventContext`] and [`DbConnection`], +/// with methods provided by extension traits for each table defined by the module. +pub struct RemoteTables { + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for RemoteTables { + type Module = RemoteModule; +} + +/// A connection to a remote module, including a materialized view of a subset of the database. +/// +/// Connect to a remote module by calling [`DbConnection::builder`] +/// and using the [`__sdk::DbConnectionBuilder`] builder-pattern constructor. +/// +/// You must explicitly advance the connection by calling any one of: +/// +/// - [`DbConnection::frame_tick`]. +#[cfg_attr(not(target_arch = "wasm32"), doc = "- [`DbConnection::run_threaded`].")] +#[cfg_attr(target_arch = "wasm32", doc = "- [`DbConnection::run_background_task`].")] +/// - [`DbConnection::run_async`]. +/// - [`DbConnection::advance_one_message`]. +#[cfg_attr( + not(target_arch = "wasm32"), + doc = "- [`DbConnection::advance_one_message_blocking`]." +)] +/// - [`DbConnection::advance_one_message_async`]. +/// +/// Which of these methods you should call depends on the specific needs of your application, +/// but you must call one of them, or else the connection will never progress. +pub struct DbConnection { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + #[doc(hidden)] + + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + + imp: __sdk::DbContextImpl, +} + +impl __sdk::InModule for DbConnection { + type Module = RemoteModule; +} + +impl __sdk::DbContext for DbConnection { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl DbConnection { + /// Builder-pattern constructor for a connection to a remote module. + /// + /// See [`__sdk::DbConnectionBuilder`] for required and optional configuration for the new connection. + pub fn builder() -> __sdk::DbConnectionBuilder { + __sdk::DbConnectionBuilder::new() + } + + /// If any WebSocket messages are waiting, process one of them. + /// + /// Returns `true` if a message was processed, or `false` if the queue is empty. + /// Callers should invoke this message in a loop until it returns `false` + /// or for as much time is available to process messages. + /// + /// Returns an error if the connection is disconnected. + /// If the disconnection in question was normal, + /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], + /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// + /// This is a low-level primitive exposed for power users who need significant control over scheduling. + /// Most applications should call [`Self::frame_tick`] each frame + /// to fully exhaust the queue whenever time is available. + pub fn advance_one_message(&self) -> __sdk::Result { + self.imp.advance_one_message() + } + + /// Process one WebSocket message, potentially blocking the current thread until one is received. + /// + /// Returns an error if the connection is disconnected. + /// If the disconnection in question was normal, + /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], + /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// + /// This is a low-level primitive exposed for power users who need significant control over scheduling. + /// Most applications should call [`Self::run_threaded`] to spawn a thread + /// which advances the connection automatically. + #[cfg(not(target_arch = "wasm32"))] + pub fn advance_one_message_blocking(&self) -> __sdk::Result<()> { + self.imp.advance_one_message_blocking() + } + + /// Process one WebSocket message, `await`ing until one is received. + /// + /// Returns an error if the connection is disconnected. + /// If the disconnection in question was normal, + /// i.e. the result of a call to [`__sdk::DbContext::disconnect`], + /// the returned error will be downcastable to [`__sdk::DisconnectedError`]. + /// + /// This is a low-level primitive exposed for power users who need significant control over scheduling. + /// Most applications should call [`Self::run_async`] to run an `async` loop + /// which advances the connection when polled. + pub async fn advance_one_message_async(&self) -> __sdk::Result<()> { + self.imp.advance_one_message_async().await + } + + /// Process all WebSocket messages waiting in the queue, + /// then return without `await`ing or blocking the current thread. + pub fn frame_tick(&self) -> __sdk::Result<()> { + self.imp.frame_tick() + } + + /// Spawn a thread which processes WebSocket messages as they are received. + #[cfg(not(target_arch = "wasm32"))] + pub fn run_threaded(&self) -> std::thread::JoinHandle<()> { + self.imp.run_threaded() + } + + /// Spawn a background task which processes WebSocket messages as they are received. + #[cfg(target_arch = "wasm32")] + pub fn run_background_task(&self) { + self.imp.run_background_task() + } + + /// Run an `async` loop which processes WebSocket messages when polled. + pub async fn run_async(&self) -> __sdk::Result<()> { + self.imp.run_async().await + } +} + +impl __sdk::DbConnection for DbConnection { + fn new(imp: __sdk::DbContextImpl) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + imp, + } + } +} + +/// A handle on a subscribed query. +// TODO: Document this better after implementing the new subscription API. +#[derive(Clone)] +pub struct SubscriptionHandle { + imp: __sdk::SubscriptionHandleImpl, +} + +impl __sdk::InModule for SubscriptionHandle { + type Module = RemoteModule; +} + +impl __sdk::SubscriptionHandle for SubscriptionHandle { + fn new(imp: __sdk::SubscriptionHandleImpl) -> Self { + Self { imp } + } + + /// Returns true if this subscription has been terminated due to an unsubscribe call or an error. + fn is_ended(&self) -> bool { + self.imp.is_ended() + } + + /// Returns true if this subscription has been applied and has not yet been unsubscribed. + fn is_active(&self) -> bool { + self.imp.is_active() + } + + /// Unsubscribe from the query controlled by this `SubscriptionHandle`, + /// then run `on_end` when its rows are removed from the client cache. + fn unsubscribe_then(self, on_end: __sdk::OnEndedCallback) -> __sdk::Result<()> { + self.imp.unsubscribe_then(Some(on_end)) + } + + fn unsubscribe(self) -> __sdk::Result<()> { + self.imp.unsubscribe_then(None) + } +} + +/// Alias trait for a [`__sdk::DbContext`] connected to this module, +/// with that trait's associated types bounded to this module's concrete types. +/// +/// Users can use this trait as a boundary on definitions which should accept +/// either a [`DbConnection`] or an [`EventContext`] and operate on either. +pub trait RemoteDbContext: + __sdk::DbContext< + DbView = RemoteTables, + Reducers = RemoteReducers, + SubscriptionBuilder = __sdk::SubscriptionBuilder, +> +{ +} +impl< + Ctx: __sdk::DbContext< + DbView = RemoteTables, + Reducers = RemoteReducers, + SubscriptionBuilder = __sdk::SubscriptionBuilder, + >, + > RemoteDbContext for Ctx +{ +} + +/// An [`__sdk::DbContext`] augmented with a [`__sdk::Event`], +/// passed to [`__sdk::Table::on_insert`], [`__sdk::Table::on_delete`] and [`__sdk::TableWithPrimaryKey::on_update`] callbacks. +pub struct EventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + /// The event which caused these callbacks to run. + pub event: __sdk::Event, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for EventContext { + type Event = __sdk::Event; + fn event(&self) -> &Self::Event { + &self.event + } + fn new(imp: __sdk::DbContextImpl, event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + event, + imp, + } + } +} + +impl __sdk::InModule for EventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for EventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::EventContext for EventContext {} + +/// An [`__sdk::DbContext`] augmented with a [`__sdk::ReducerEvent`], +/// passed to on-reducer callbacks. +pub struct ReducerEventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + /// The event which caused these callbacks to run. + pub event: __sdk::ReducerEvent, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for ReducerEventContext { + type Event = __sdk::ReducerEvent; + fn event(&self) -> &Self::Event { + &self.event + } + fn new(imp: __sdk::DbContextImpl, event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + event, + imp, + } + } +} + +impl __sdk::InModule for ReducerEventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for ReducerEventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::ReducerEventContext for ReducerEventContext {} + +/// An [`__sdk::DbContext`] passed to procedure callbacks. +pub struct ProcedureEventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for ProcedureEventContext { + type Event = (); + fn event(&self) -> &Self::Event { + &() + } + fn new(imp: __sdk::DbContextImpl, _event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + imp, + } + } +} + +impl __sdk::InModule for ProcedureEventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for ProcedureEventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::ProcedureEventContext for ProcedureEventContext {} + +/// An [`__sdk::DbContext`] passed to [`__sdk::SubscriptionBuilder::on_applied`] and [`SubscriptionHandle::unsubscribe_then`] callbacks. +pub struct SubscriptionEventContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for SubscriptionEventContext { + type Event = (); + fn event(&self) -> &Self::Event { + &() + } + fn new(imp: __sdk::DbContextImpl, _event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + imp, + } + } +} + +impl __sdk::InModule for SubscriptionEventContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for SubscriptionEventContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::SubscriptionEventContext for SubscriptionEventContext {} + +/// An [`__sdk::DbContext`] augmented with a [`__sdk::Error`], +/// passed to [`__sdk::DbConnectionBuilder::on_disconnect`], [`__sdk::DbConnectionBuilder::on_connect_error`] and [`__sdk::SubscriptionBuilder::on_error`] callbacks. +pub struct ErrorContext { + /// Access to tables defined by the module via extension traits implemented for [`RemoteTables`]. + pub db: RemoteTables, + /// Access to reducers defined by the module via extension traits implemented for [`RemoteReducers`]. + pub reducers: RemoteReducers, + /// Access to procedures defined by the module via extension traits implemented for [`RemoteProcedures`]. + pub procedures: RemoteProcedures, + /// The event which caused these callbacks to run. + pub event: Option<__sdk::Error>, + imp: __sdk::DbContextImpl, +} + +impl __sdk::AbstractEventContext for ErrorContext { + type Event = Option<__sdk::Error>; + fn event(&self) -> &Self::Event { + &self.event + } + fn new(imp: __sdk::DbContextImpl, event: Self::Event) -> Self { + Self { + db: RemoteTables { imp: imp.clone() }, + reducers: RemoteReducers { imp: imp.clone() }, + procedures: RemoteProcedures { imp: imp.clone() }, + event, + imp, + } + } +} + +impl __sdk::InModule for ErrorContext { + type Module = RemoteModule; +} + +impl __sdk::DbContext for ErrorContext { + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + + fn db(&self) -> &Self::DbView { + &self.db + } + fn reducers(&self) -> &Self::Reducers { + &self.reducers + } + fn procedures(&self) -> &Self::Procedures { + &self.procedures + } + + fn is_active(&self) -> bool { + self.imp.is_active() + } + + fn disconnect(&self) -> __sdk::Result<()> { + self.imp.disconnect() + } + + type SubscriptionBuilder = __sdk::SubscriptionBuilder; + + fn subscription_builder(&self) -> Self::SubscriptionBuilder { + __sdk::SubscriptionBuilder::new(&self.imp) + } + + fn try_identity(&self) -> Option<__sdk::Identity> { + self.imp.try_identity() + } + fn connection_id(&self) -> __sdk::ConnectionId { + self.imp.connection_id() + } + fn try_connection_id(&self) -> Option<__sdk::ConnectionId> { + self.imp.try_connection_id() + } +} + +impl __sdk::ErrorContext for ErrorContext {} + +impl __sdk::SpacetimeModule for RemoteModule { + type DbConnection = DbConnection; + type EventContext = EventContext; + type ReducerEventContext = ReducerEventContext; + type ProcedureEventContext = ProcedureEventContext; + type SubscriptionEventContext = SubscriptionEventContext; + type ErrorContext = ErrorContext; + type Reducer = Reducer; + type DbView = RemoteTables; + type Reducers = RemoteReducers; + type Procedures = RemoteProcedures; + type DbUpdate = DbUpdate; + type AppliedDiff<'r> = AppliedDiff<'r>; + type SubscriptionHandle = SubscriptionHandle; + type QueryBuilder = __sdk::QueryBuilder; + + fn register_tables(client_cache: &mut __sdk::ClientCache) {} + const ALL_TABLE_NAMES: &'static [&'static str] = &[]; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_line_input_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_line_input_type.rs new file mode 100644 index 00000000000..4a1ebd2ee02 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_line_input_type.rs @@ -0,0 +1,17 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrderLineInput { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, +} + +impl __sdk::InModule for NewOrderLineInput { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_line_result_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_line_result_type.rs new file mode 100644 index 00000000000..064e19222ca --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_line_result_type.rs @@ -0,0 +1,22 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrderLineResult { + pub item_id: u32, + pub item_name: String, + pub supply_w_id: u16, + pub quantity: u32, + pub stock_quantity: i32, + pub item_price_cents: i64, + pub amount_cents: i64, + pub brand_generic: String, +} + +impl __sdk::InModule for NewOrderLineResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs b/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs new file mode 100644 index 00000000000..03bf82d7d47 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_procedure.rs @@ -0,0 +1,69 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_line_input_type::NewOrderLineInput; +use super::new_order_result_type::NewOrderResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct NewOrderArgs { + pub w_id: u16, + pub d_id: u8, + pub c_id: u32, + pub order_lines: Vec, +} + +impl __sdk::InModule for NewOrderArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `new_order`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait new_order { + fn new_order(&self, w_id: u16, d_id: u8, c_id: u32, order_lines: Vec) { + self.new_order_then(w_id, d_id, c_id, order_lines, |_, _| {}); + } + + fn new_order_then( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl new_order for super::RemoteProcedures { + fn new_order_then( + &self, + w_id: u16, + d_id: u8, + c_id: u32, + order_lines: Vec, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "new_order", + NewOrderArgs { + w_id, + d_id, + c_id, + order_lines, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_result_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_result_type.rs new file mode 100644 index 00000000000..0ffb88612f0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_result_type.rs @@ -0,0 +1,26 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::new_order_line_result_type::NewOrderLineResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrderResult { + pub warehouse_tax_bps: i32, + pub district_tax_bps: i32, + pub customer_discount_bps: i32, + pub customer_last: String, + pub customer_credit: String, + pub order_id: u32, + pub entry_d: __sdk::Timestamp, + pub total_amount_cents: i64, + pub all_local: bool, + pub lines: Vec, +} + +impl __sdk::InModule for NewOrderResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/new_order_type.rs b/tools/tpcc-runner/src/module_bindings/new_order_type.rs new file mode 100644 index 00000000000..c89ecf0ffa2 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/new_order_type.rs @@ -0,0 +1,51 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct NewOrder { + pub no_w_id: u16, + pub no_d_id: u8, + pub no_o_id: u32, +} + +impl __sdk::InModule for NewOrder { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `NewOrder`. +/// +/// Provides typed access to columns for query building. +pub struct NewOrderCols { + pub no_w_id: __sdk::__query_builder::Col, + pub no_d_id: __sdk::__query_builder::Col, + pub no_o_id: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for NewOrder { + type Cols = NewOrderCols; + fn cols(table_name: &'static str) -> Self::Cols { + NewOrderCols { + no_w_id: __sdk::__query_builder::Col::new(table_name, "no_w_id"), + no_d_id: __sdk::__query_builder::Col::new(table_name, "no_d_id"), + no_o_id: __sdk::__query_builder::Col::new(table_name, "no_o_id"), + } + } +} + +/// Indexed column accessor struct for the table `NewOrder`. +/// +/// Provides typed access to indexed columns for query building. +pub struct NewOrderIxCols {} + +impl __sdk::__query_builder::HasIxCols for NewOrder { + type IxCols = NewOrderIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + NewOrderIxCols {} + } +} + +impl __sdk::__query_builder::CanBeLookupTable for NewOrder {} diff --git a/tools/tpcc-runner/src/module_bindings/o_order_type.rs b/tools/tpcc-runner/src/module_bindings/o_order_type.rs new file mode 100644 index 00000000000..9f9e6b9d6b6 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/o_order_type.rs @@ -0,0 +1,66 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OOrder { + pub o_w_id: u16, + pub o_d_id: u8, + pub o_id: u32, + pub o_c_id: u32, + pub o_entry_d: __sdk::Timestamp, + pub o_carrier_id: Option, + pub o_ol_cnt: u8, + pub o_all_local: bool, +} + +impl __sdk::InModule for OOrder { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `OOrder`. +/// +/// Provides typed access to columns for query building. +pub struct OOrderCols { + pub o_w_id: __sdk::__query_builder::Col, + pub o_d_id: __sdk::__query_builder::Col, + pub o_id: __sdk::__query_builder::Col, + pub o_c_id: __sdk::__query_builder::Col, + pub o_entry_d: __sdk::__query_builder::Col, + pub o_carrier_id: __sdk::__query_builder::Col>, + pub o_ol_cnt: __sdk::__query_builder::Col, + pub o_all_local: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for OOrder { + type Cols = OOrderCols; + fn cols(table_name: &'static str) -> Self::Cols { + OOrderCols { + o_w_id: __sdk::__query_builder::Col::new(table_name, "o_w_id"), + o_d_id: __sdk::__query_builder::Col::new(table_name, "o_d_id"), + o_id: __sdk::__query_builder::Col::new(table_name, "o_id"), + o_c_id: __sdk::__query_builder::Col::new(table_name, "o_c_id"), + o_entry_d: __sdk::__query_builder::Col::new(table_name, "o_entry_d"), + o_carrier_id: __sdk::__query_builder::Col::new(table_name, "o_carrier_id"), + o_ol_cnt: __sdk::__query_builder::Col::new(table_name, "o_ol_cnt"), + o_all_local: __sdk::__query_builder::Col::new(table_name, "o_all_local"), + } + } +} + +/// Indexed column accessor struct for the table `OOrder`. +/// +/// Provides typed access to indexed columns for query building. +pub struct OOrderIxCols {} + +impl __sdk::__query_builder::HasIxCols for OOrder { + type IxCols = OOrderIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + OOrderIxCols {} + } +} + +impl __sdk::__query_builder::CanBeLookupTable for OOrder {} diff --git a/tools/tpcc-runner/src/module_bindings/order_line_type.rs b/tools/tpcc-runner/src/module_bindings/order_line_type.rs new file mode 100644 index 00000000000..2531e588647 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_line_type.rs @@ -0,0 +1,72 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OrderLine { + pub ol_w_id: u16, + pub ol_d_id: u8, + pub ol_o_id: u32, + pub ol_number: u8, + pub ol_i_id: u32, + pub ol_supply_w_id: u16, + pub ol_delivery_d: Option<__sdk::Timestamp>, + pub ol_quantity: u32, + pub ol_amount_cents: i64, + pub ol_dist_info: String, +} + +impl __sdk::InModule for OrderLine { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `OrderLine`. +/// +/// Provides typed access to columns for query building. +pub struct OrderLineCols { + pub ol_w_id: __sdk::__query_builder::Col, + pub ol_d_id: __sdk::__query_builder::Col, + pub ol_o_id: __sdk::__query_builder::Col, + pub ol_number: __sdk::__query_builder::Col, + pub ol_i_id: __sdk::__query_builder::Col, + pub ol_supply_w_id: __sdk::__query_builder::Col, + pub ol_delivery_d: __sdk::__query_builder::Col>, + pub ol_quantity: __sdk::__query_builder::Col, + pub ol_amount_cents: __sdk::__query_builder::Col, + pub ol_dist_info: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for OrderLine { + type Cols = OrderLineCols; + fn cols(table_name: &'static str) -> Self::Cols { + OrderLineCols { + ol_w_id: __sdk::__query_builder::Col::new(table_name, "ol_w_id"), + ol_d_id: __sdk::__query_builder::Col::new(table_name, "ol_d_id"), + ol_o_id: __sdk::__query_builder::Col::new(table_name, "ol_o_id"), + ol_number: __sdk::__query_builder::Col::new(table_name, "ol_number"), + ol_i_id: __sdk::__query_builder::Col::new(table_name, "ol_i_id"), + ol_supply_w_id: __sdk::__query_builder::Col::new(table_name, "ol_supply_w_id"), + ol_delivery_d: __sdk::__query_builder::Col::new(table_name, "ol_delivery_d"), + ol_quantity: __sdk::__query_builder::Col::new(table_name, "ol_quantity"), + ol_amount_cents: __sdk::__query_builder::Col::new(table_name, "ol_amount_cents"), + ol_dist_info: __sdk::__query_builder::Col::new(table_name, "ol_dist_info"), + } + } +} + +/// Indexed column accessor struct for the table `OrderLine`. +/// +/// Provides typed access to indexed columns for query building. +pub struct OrderLineIxCols {} + +impl __sdk::__query_builder::HasIxCols for OrderLine { + type IxCols = OrderLineIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + OrderLineIxCols {} + } +} + +impl __sdk::__query_builder::CanBeLookupTable for OrderLine {} diff --git a/tools/tpcc-runner/src/module_bindings/order_status_line_result_type.rs b/tools/tpcc-runner/src/module_bindings/order_status_line_result_type.rs new file mode 100644 index 00000000000..a4bd806a60a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_status_line_result_type.rs @@ -0,0 +1,19 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OrderStatusLineResult { + pub item_id: u32, + pub supply_w_id: u16, + pub quantity: u32, + pub amount_cents: i64, + pub delivery_d: Option<__sdk::Timestamp>, +} + +impl __sdk::InModule for OrderStatusLineResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs b/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs new file mode 100644 index 00000000000..d0e44d9c7f4 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_status_procedure.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_selector_type::CustomerSelector; +use super::order_status_result_type::OrderStatusResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct OrderStatusArgs { + pub w_id: u16, + pub d_id: u8, + pub customer: CustomerSelector, +} + +impl __sdk::InModule for OrderStatusArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `order_status`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait order_status { + fn order_status(&self, w_id: u16, d_id: u8, customer: CustomerSelector) { + self.order_status_then(w_id, d_id, customer, |_, _| {}); + } + + fn order_status_then( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl order_status for super::RemoteProcedures { + fn order_status_then( + &self, + w_id: u16, + d_id: u8, + customer: CustomerSelector, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "order_status", + OrderStatusArgs { w_id, d_id, customer }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/order_status_result_type.rs b/tools/tpcc-runner/src/module_bindings/order_status_result_type.rs new file mode 100644 index 00000000000..0c5a387e7a0 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/order_status_result_type.rs @@ -0,0 +1,25 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::order_status_line_result_type::OrderStatusLineResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct OrderStatusResult { + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub order_id: Option, + pub order_entry_d: Option<__sdk::Timestamp>, + pub carrier_id: Option, + pub lines: Vec, +} + +impl __sdk::InModule for OrderStatusResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/payment_procedure.rs b/tools/tpcc-runner/src/module_bindings/payment_procedure.rs new file mode 100644 index 00000000000..1557b3e07fe --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/payment_procedure.rs @@ -0,0 +1,85 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::customer_selector_type::CustomerSelector; +use super::payment_result_type::PaymentResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct PaymentArgs { + pub w_id: u16, + pub d_id: u8, + pub c_w_id: u16, + pub c_d_id: u8, + pub customer: CustomerSelector, + pub payment_amount_cents: i64, +} + +impl __sdk::InModule for PaymentArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `payment`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait payment { + fn payment( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + ) { + self.payment_then(w_id, d_id, c_w_id, c_d_id, customer, payment_amount_cents, |_, _| {}); + } + + fn payment_then( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl payment for super::RemoteProcedures { + fn payment_then( + &self, + w_id: u16, + d_id: u8, + c_w_id: u16, + c_d_id: u8, + customer: CustomerSelector, + payment_amount_cents: i64, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "payment", + PaymentArgs { + w_id, + d_id, + c_w_id, + c_d_id, + customer, + payment_amount_cents, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/payment_result_type.rs b/tools/tpcc-runner/src/module_bindings/payment_result_type.rs new file mode 100644 index 00000000000..0b33b3ff60e --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/payment_result_type.rs @@ -0,0 +1,25 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct PaymentResult { + pub warehouse_name: String, + pub district_name: String, + pub customer_id: u32, + pub customer_first: String, + pub customer_middle: String, + pub customer_last: String, + pub customer_balance_cents: i64, + pub customer_credit: String, + pub customer_discount_bps: i32, + pub payment_amount_cents: i64, + pub customer_data: Option, +} + +impl __sdk::InModule for PaymentResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs b/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs new file mode 100644 index 00000000000..f42b2b1883a --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/queue_delivery_procedure.rs @@ -0,0 +1,84 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::delivery_queue_ack_type::DeliveryQueueAck; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct QueueDeliveryArgs { + pub run_id: String, + pub driver_id: String, + pub terminal_id: u32, + pub request_id: u64, + pub w_id: u16, + pub carrier_id: u8, +} + +impl __sdk::InModule for QueueDeliveryArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `queue_delivery`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait queue_delivery { + fn queue_delivery( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + ) { + self.queue_delivery_then(run_id, driver_id, terminal_id, request_id, w_id, carrier_id, |_, _| {}); + } + + fn queue_delivery_then( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl queue_delivery for super::RemoteProcedures { + fn queue_delivery_then( + &self, + run_id: String, + driver_id: String, + terminal_id: u32, + request_id: u64, + w_id: u16, + carrier_id: u8, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "queue_delivery", + QueueDeliveryArgs { + run_id, + driver_id, + terminal_id, + request_id, + w_id, + carrier_id, + }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs b/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs new file mode 100644 index 00000000000..da9424c94c4 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/reset_tpcc_reducer.rs @@ -0,0 +1,61 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub(super) struct ResetTpccArgs {} + +impl From for super::Reducer { + fn from(args: ResetTpccArgs) -> Self { + Self::ResetTpcc + } +} + +impl __sdk::InModule for ResetTpccArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the reducer `reset_tpcc`. +/// +/// Implemented for [`super::RemoteReducers`]. +pub trait reset_tpcc { + /// Request that the remote module invoke the reducer `reset_tpcc` to run as soon as possible. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and this method provides no way to listen for its completion status. + /// /// Use [`reset_tpcc:reset_tpcc_then`] to run a callback after the reducer completes. + fn reset_tpcc(&self) -> __sdk::Result<()> { + self.reset_tpcc_then(|_, _| {}) + } + + /// Request that the remote module invoke the reducer `reset_tpcc` to run as soon as possible, + /// registering `callback` to run when we are notified that the reducer completed. + /// + /// This method returns immediately, and errors only if we are unable to send the request. + /// The reducer will run asynchronously in the future, + /// and its status can be observed with the `callback`. + fn reset_tpcc_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()>; +} + +impl reset_tpcc for super::RemoteReducers { + fn reset_tpcc_then( + &self, + + callback: impl FnOnce(&super::ReducerEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) -> __sdk::Result<()> { + self.imp.invoke_reducer_with_callback(ResetTpccArgs {}, callback) + } +} diff --git a/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs b/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs new file mode 100644 index 00000000000..cecefcbbf0e --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/stock_level_procedure.rs @@ -0,0 +1,60 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +use super::stock_level_result_type::StockLevelResult; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +struct StockLevelArgs { + pub w_id: u16, + pub d_id: u8, + pub threshold: i32, +} + +impl __sdk::InModule for StockLevelArgs { + type Module = super::RemoteModule; +} + +#[allow(non_camel_case_types)] +/// Extension trait for access to the procedure `stock_level`. +/// +/// Implemented for [`super::RemoteProcedures`]. +pub trait stock_level { + fn stock_level(&self, w_id: u16, d_id: u8, threshold: i32) { + self.stock_level_then(w_id, d_id, threshold, |_, _| {}); + } + + fn stock_level_then( + &self, + w_id: u16, + d_id: u8, + threshold: i32, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ); +} + +impl stock_level for super::RemoteProcedures { + fn stock_level_then( + &self, + w_id: u16, + d_id: u8, + threshold: i32, + + __callback: impl FnOnce(&super::ProcedureEventContext, Result, __sdk::InternalError>) + + Send + + 'static, + ) { + self.imp + .invoke_procedure_with_callback::<_, Result>( + "stock_level", + StockLevelArgs { w_id, d_id, threshold }, + __callback, + ); + } +} diff --git a/tools/tpcc-runner/src/module_bindings/stock_level_result_type.rs b/tools/tpcc-runner/src/module_bindings/stock_level_result_type.rs new file mode 100644 index 00000000000..426e4853363 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/stock_level_result_type.rs @@ -0,0 +1,18 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct StockLevelResult { + pub warehouse_id: u16, + pub district_id: u8, + pub threshold: i32, + pub low_stock_count: u32, +} + +impl __sdk::InModule for StockLevelResult { + type Module = super::RemoteModule; +} diff --git a/tools/tpcc-runner/src/module_bindings/stock_type.rs b/tools/tpcc-runner/src/module_bindings/stock_type.rs new file mode 100644 index 00000000000..e3780673370 --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/stock_type.rs @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Stock { + pub s_w_id: u16, + pub s_i_id: u32, + pub s_quantity: i32, + pub s_dist_01: String, + pub s_dist_02: String, + pub s_dist_03: String, + pub s_dist_04: String, + pub s_dist_05: String, + pub s_dist_06: String, + pub s_dist_07: String, + pub s_dist_08: String, + pub s_dist_09: String, + pub s_dist_10: String, + pub s_ytd: u64, + pub s_order_cnt: u32, + pub s_remote_cnt: u32, + pub s_data: String, +} + +impl __sdk::InModule for Stock { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Stock`. +/// +/// Provides typed access to columns for query building. +pub struct StockCols { + pub s_w_id: __sdk::__query_builder::Col, + pub s_i_id: __sdk::__query_builder::Col, + pub s_quantity: __sdk::__query_builder::Col, + pub s_dist_01: __sdk::__query_builder::Col, + pub s_dist_02: __sdk::__query_builder::Col, + pub s_dist_03: __sdk::__query_builder::Col, + pub s_dist_04: __sdk::__query_builder::Col, + pub s_dist_05: __sdk::__query_builder::Col, + pub s_dist_06: __sdk::__query_builder::Col, + pub s_dist_07: __sdk::__query_builder::Col, + pub s_dist_08: __sdk::__query_builder::Col, + pub s_dist_09: __sdk::__query_builder::Col, + pub s_dist_10: __sdk::__query_builder::Col, + pub s_ytd: __sdk::__query_builder::Col, + pub s_order_cnt: __sdk::__query_builder::Col, + pub s_remote_cnt: __sdk::__query_builder::Col, + pub s_data: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Stock { + type Cols = StockCols; + fn cols(table_name: &'static str) -> Self::Cols { + StockCols { + s_w_id: __sdk::__query_builder::Col::new(table_name, "s_w_id"), + s_i_id: __sdk::__query_builder::Col::new(table_name, "s_i_id"), + s_quantity: __sdk::__query_builder::Col::new(table_name, "s_quantity"), + s_dist_01: __sdk::__query_builder::Col::new(table_name, "s_dist_01"), + s_dist_02: __sdk::__query_builder::Col::new(table_name, "s_dist_02"), + s_dist_03: __sdk::__query_builder::Col::new(table_name, "s_dist_03"), + s_dist_04: __sdk::__query_builder::Col::new(table_name, "s_dist_04"), + s_dist_05: __sdk::__query_builder::Col::new(table_name, "s_dist_05"), + s_dist_06: __sdk::__query_builder::Col::new(table_name, "s_dist_06"), + s_dist_07: __sdk::__query_builder::Col::new(table_name, "s_dist_07"), + s_dist_08: __sdk::__query_builder::Col::new(table_name, "s_dist_08"), + s_dist_09: __sdk::__query_builder::Col::new(table_name, "s_dist_09"), + s_dist_10: __sdk::__query_builder::Col::new(table_name, "s_dist_10"), + s_ytd: __sdk::__query_builder::Col::new(table_name, "s_ytd"), + s_order_cnt: __sdk::__query_builder::Col::new(table_name, "s_order_cnt"), + s_remote_cnt: __sdk::__query_builder::Col::new(table_name, "s_remote_cnt"), + s_data: __sdk::__query_builder::Col::new(table_name, "s_data"), + } + } +} + +/// Indexed column accessor struct for the table `Stock`. +/// +/// Provides typed access to indexed columns for query building. +pub struct StockIxCols {} + +impl __sdk::__query_builder::HasIxCols for Stock { + type IxCols = StockIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + StockIxCols {} + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Stock {} diff --git a/tools/tpcc-runner/src/module_bindings/warehouse_type.rs b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs new file mode 100644 index 00000000000..66f02971a4f --- /dev/null +++ b/tools/tpcc-runner/src/module_bindings/warehouse_type.rs @@ -0,0 +1,73 @@ +// THIS FILE IS AUTOMATICALLY GENERATED BY SPACETIMEDB. EDITS TO THIS FILE +// WILL NOT BE SAVED. MODIFY TABLES IN YOUR MODULE SOURCE CODE INSTEAD. + +#![allow(unused, clippy::all)] +use spacetimedb_sdk::__codegen::{self as __sdk, __lib, __sats, __ws}; + +#[derive(__lib::ser::Serialize, __lib::de::Deserialize, Clone, PartialEq, Debug)] +#[sats(crate = __lib)] +pub struct Warehouse { + pub w_id: u16, + pub w_name: String, + pub w_street_1: String, + pub w_street_2: String, + pub w_city: String, + pub w_state: String, + pub w_zip: String, + pub w_tax_bps: i32, + pub w_ytd_cents: i64, +} + +impl __sdk::InModule for Warehouse { + type Module = super::RemoteModule; +} + +/// Column accessor struct for the table `Warehouse`. +/// +/// Provides typed access to columns for query building. +pub struct WarehouseCols { + pub w_id: __sdk::__query_builder::Col, + pub w_name: __sdk::__query_builder::Col, + pub w_street_1: __sdk::__query_builder::Col, + pub w_street_2: __sdk::__query_builder::Col, + pub w_city: __sdk::__query_builder::Col, + pub w_state: __sdk::__query_builder::Col, + pub w_zip: __sdk::__query_builder::Col, + pub w_tax_bps: __sdk::__query_builder::Col, + pub w_ytd_cents: __sdk::__query_builder::Col, +} + +impl __sdk::__query_builder::HasCols for Warehouse { + type Cols = WarehouseCols; + fn cols(table_name: &'static str) -> Self::Cols { + WarehouseCols { + w_id: __sdk::__query_builder::Col::new(table_name, "w_id"), + w_name: __sdk::__query_builder::Col::new(table_name, "w_name"), + w_street_1: __sdk::__query_builder::Col::new(table_name, "w_street_1"), + w_street_2: __sdk::__query_builder::Col::new(table_name, "w_street_2"), + w_city: __sdk::__query_builder::Col::new(table_name, "w_city"), + w_state: __sdk::__query_builder::Col::new(table_name, "w_state"), + w_zip: __sdk::__query_builder::Col::new(table_name, "w_zip"), + w_tax_bps: __sdk::__query_builder::Col::new(table_name, "w_tax_bps"), + w_ytd_cents: __sdk::__query_builder::Col::new(table_name, "w_ytd_cents"), + } + } +} + +/// Indexed column accessor struct for the table `Warehouse`. +/// +/// Provides typed access to indexed columns for query building. +pub struct WarehouseIxCols { + pub w_id: __sdk::__query_builder::IxCol, +} + +impl __sdk::__query_builder::HasIxCols for Warehouse { + type IxCols = WarehouseIxCols; + fn ix_cols(table_name: &'static str) -> Self::IxCols { + WarehouseIxCols { + w_id: __sdk::__query_builder::IxCol::new(table_name, "w_id"), + } + } +} + +impl __sdk::__query_builder::CanBeLookupTable for Warehouse {} diff --git a/tools/tpcc-runner/src/protocol.rs b/tools/tpcc-runner/src/protocol.rs new file mode 100644 index 00000000000..5d76fb94ea1 --- /dev/null +++ b/tools/tpcc-runner/src/protocol.rs @@ -0,0 +1,36 @@ +use serde::{Deserialize, Serialize}; + +use crate::summary::DriverSummary; + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RegisterDriverRequest { + pub driver_id: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RegisterDriverResponse { + pub accepted: bool, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RunSchedule { + pub run_id: String, + pub warmup_start_ms: u64, + pub measure_start_ms: u64, + pub measure_end_ms: u64, + pub stop_ms: u64, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScheduleResponse { + pub ready: bool, + pub schedule: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct SubmitSummaryRequest { + pub summary: DriverSummary, +} diff --git a/tools/tpcc-runner/src/summary.rs b/tools/tpcc-runner/src/summary.rs new file mode 100644 index 00000000000..6cacce6cf76 --- /dev/null +++ b/tools/tpcc-runner/src/summary.rs @@ -0,0 +1,585 @@ +use anyhow::{Context, Result}; +use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::fs::File; +use std::io::{BufWriter, Write}; +use std::path::Path; +use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::module_bindings::DeliveryCompletionView; + +const HISTOGRAM_BUCKETS_MS: [u64; 16] = [ + 1, 2, 5, 10, 20, 50, 100, 200, 500, 1_000, 2_000, 5_000, 10_000, 20_000, 60_000, 120_000, +]; + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum TransactionKind { + NewOrder, + Payment, + OrderStatus, + Delivery, + StockLevel, +} + +impl TransactionKind { + pub const ALL: [Self; 5] = [ + Self::NewOrder, + Self::Payment, + Self::OrderStatus, + Self::Delivery, + Self::StockLevel, + ]; + + pub fn as_str(self) -> &'static str { + match self { + Self::NewOrder => "new_order", + Self::Payment => "payment", + Self::OrderStatus => "order_status", + Self::Delivery => "delivery", + Self::StockLevel => "stock_level", + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Histogram { + pub buckets_ms: Vec, + pub counts: Vec, + pub count: u64, + pub sum_ms: u64, + pub max_ms: u64, +} + +impl Default for Histogram { + fn default() -> Self { + Self { + buckets_ms: HISTOGRAM_BUCKETS_MS.to_vec(), + counts: vec![0; HISTOGRAM_BUCKETS_MS.len() + 1], + count: 0, + sum_ms: 0, + max_ms: 0, + } + } +} + +impl Histogram { + pub fn record(&mut self, value_ms: u64) { + let index = self + .buckets_ms + .iter() + .position(|upper| value_ms <= *upper) + .unwrap_or(self.buckets_ms.len()); + self.counts[index] += 1; + self.count += 1; + self.sum_ms += value_ms; + self.max_ms = self.max_ms.max(value_ms); + } + + pub fn merge(&mut self, other: &Histogram) { + self.count += other.count; + self.sum_ms += other.sum_ms; + self.max_ms = self.max_ms.max(other.max_ms); + for (left, right) in self.counts.iter_mut().zip(&other.counts) { + *left += right; + } + } + + pub fn mean_ms(&self) -> f64 { + if self.count == 0 { + 0.0 + } else { + self.sum_ms as f64 / self.count as f64 + } + } + + pub fn percentile_ms(&self, pct: f64) -> u64 { + if self.count == 0 { + return 0; + } + let wanted = ((self.count as f64) * pct).ceil() as u64; + let mut seen = 0u64; + for (idx, count) in self.counts.iter().enumerate() { + seen += *count; + if seen >= wanted { + return if idx < self.buckets_ms.len() { + self.buckets_ms[idx] + } else { + self.max_ms + }; + } + } + self.max_ms + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct TransactionSummary { + pub count: u64, + pub success: u64, + pub failure: u64, + pub mean_latency_ms: f64, + pub p50_latency_ms: u64, + pub p95_latency_ms: u64, + pub p99_latency_ms: u64, + pub max_latency_ms: u64, + pub histogram: Histogram, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct ConformanceSummary { + pub new_order_rollbacks: u64, + pub new_order_total: u64, + pub new_order_remote_order_lines: u64, + pub new_order_total_order_lines: u64, + pub payment_remote: u64, + pub payment_total: u64, + pub payment_by_last_name: u64, + pub order_status_by_last_name: u64, + pub order_status_total: u64, + pub delivery_queued: u64, + pub delivery_completed: u64, + pub delivery_processed_districts: u64, + pub delivery_skipped_districts: u64, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DeliverySummary { + pub queued: u64, + pub completed: u64, + pub pending: u64, + pub processed_districts: u64, + pub skipped_districts: u64, + pub completion_mean_ms: f64, + pub completion_p50_ms: u64, + pub completion_p95_ms: u64, + pub completion_p99_ms: u64, + pub completion_max_ms: u64, + pub completion_histogram: Histogram, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DriverSummary { + pub run_id: String, + pub driver_id: String, + pub uri: String, + pub database: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, + pub warmup_secs: u64, + pub measure_secs: u64, + pub measure_start_ms: u64, + pub measure_end_ms: u64, + pub generated_at_ms: u64, + pub total_transactions: u64, + pub tpmc_like: f64, + pub transaction_mix: BTreeMap, + pub conformance: ConformanceSummary, + pub transactions: BTreeMap, + pub delivery: DeliverySummary, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct AggregateSummary { + pub run_id: String, + pub driver_count: usize, + pub drivers: Vec, + pub generated_at_ms: u64, + pub total_transactions: u64, + pub tpmc_like: f64, + pub transaction_mix: BTreeMap, + pub conformance: ConformanceSummary, + pub transactions: BTreeMap, + pub delivery: DeliverySummary, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct EventLine { + timestamp_ms: u64, + run_id: String, + driver_id: String, + terminal_id: u32, + transaction: String, + success: bool, + latency_ms: u64, + rollback: bool, + remote: bool, + by_last_name: bool, + order_line_count: u32, + remote_order_line_count: u32, + detail: Option, +} + +#[derive(Clone, Debug)] +pub struct TransactionRecord { + pub timestamp_ms: u64, + pub terminal_id: u32, + pub kind: TransactionKind, + pub success: bool, + pub latency_ms: u64, + pub rollback: bool, + pub remote: bool, + pub by_last_name: bool, + pub order_line_count: u32, + pub remote_order_line_count: u32, + pub detail: Option, +} + +#[derive(Default)] +struct TransactionAccumulator { + success: u64, + failure: u64, + histogram: Histogram, +} + +impl TransactionAccumulator { + fn record(&mut self, success: bool, latency_ms: u64) { + if success { + self.success += 1; + } else { + self.failure += 1; + } + self.histogram.record(latency_ms); + } + + fn to_summary(&self) -> TransactionSummary { + TransactionSummary { + count: self.success + self.failure, + success: self.success, + failure: self.failure, + mean_latency_ms: self.histogram.mean_ms(), + p50_latency_ms: self.histogram.percentile_ms(0.50), + p95_latency_ms: self.histogram.percentile_ms(0.95), + p99_latency_ms: self.histogram.percentile_ms(0.99), + max_latency_ms: self.histogram.max_ms, + histogram: self.histogram.clone(), + } + } +} + +pub struct MetricsCollector { + run_id: String, + driver_id: String, + writer: BufWriter, + by_kind: BTreeMap<&'static str, TransactionAccumulator>, + conformance: ConformanceSummary, + delivery_completion_histogram: Histogram, +} + +#[derive(Clone)] +pub struct SharedMetrics { + inner: Arc>, +} + +#[derive(Clone, Debug)] +pub struct DriverSummaryMeta { + pub run_id: String, + pub driver_id: String, + pub uri: String, + pub database: String, + pub terminal_start: u32, + pub terminals: u32, + pub warehouse_count: u16, + pub warmup_secs: u64, + pub measure_secs: u64, + pub measure_start_ms: u64, + pub measure_end_ms: u64, +} + +impl SharedMetrics { + pub fn create(run_id: &str, driver_id: &str, path: &Path) -> Result { + let file = File::create(path).with_context(|| format!("failed to create {}", path.display()))?; + let collector = MetricsCollector { + run_id: run_id.to_string(), + driver_id: driver_id.to_string(), + writer: BufWriter::new(file), + by_kind: TransactionKind::ALL + .into_iter() + .map(|kind| (kind.as_str(), TransactionAccumulator::default())) + .collect(), + conformance: ConformanceSummary::default(), + delivery_completion_histogram: Histogram::default(), + }; + Ok(Self { + inner: Arc::new(Mutex::new(collector)), + }) + } + + pub fn record(&self, event: TransactionRecord) -> Result<()> { + let mut collector = self.inner.lock(); + collector.record(event) + } + + pub fn record_delivery_completion(&self, completion: &DeliveryCompletionView) { + let mut collector = self.inner.lock(); + collector.record_delivery_completion(completion); + } + + pub fn delivery_queued(&self) -> u64 { + self.inner.lock().conformance.delivery_queued + } + + pub fn finalize(self, meta: DriverSummaryMeta) -> Result { + self.inner.lock().finalize(meta) + } +} + +impl MetricsCollector { + fn record(&mut self, event: TransactionRecord) -> Result<()> { + let line = EventLine { + timestamp_ms: event.timestamp_ms, + run_id: self.run_id.clone(), + driver_id: self.driver_id.clone(), + terminal_id: event.terminal_id, + transaction: event.kind.as_str().to_string(), + success: event.success, + latency_ms: event.latency_ms, + rollback: event.rollback, + remote: event.remote, + by_last_name: event.by_last_name, + order_line_count: event.order_line_count, + remote_order_line_count: event.remote_order_line_count, + detail: event.detail.clone(), + }; + serde_json::to_writer(&mut self.writer, &line)?; + self.writer.write_all(b"\n")?; + + let accumulator = self + .by_kind + .get_mut(event.kind.as_str()) + .expect("all transaction kinds registered"); + accumulator.record(event.success, event.latency_ms); + + match event.kind { + TransactionKind::NewOrder => { + self.conformance.new_order_total += 1; + self.conformance.new_order_total_order_lines += u64::from(event.order_line_count); + self.conformance.new_order_remote_order_lines += u64::from(event.remote_order_line_count); + if event.rollback { + self.conformance.new_order_rollbacks += 1; + } + } + TransactionKind::Payment => { + self.conformance.payment_total += 1; + if event.remote { + self.conformance.payment_remote += 1; + } + if event.by_last_name { + self.conformance.payment_by_last_name += 1; + } + } + TransactionKind::OrderStatus => { + self.conformance.order_status_total += 1; + if event.by_last_name { + self.conformance.order_status_by_last_name += 1; + } + } + TransactionKind::Delivery => { + self.conformance.delivery_queued += 1; + } + TransactionKind::StockLevel => {} + } + Ok(()) + } + + fn record_delivery_completion(&mut self, completion: &DeliveryCompletionView) { + self.conformance.delivery_completed += 1; + self.conformance.delivery_processed_districts += u64::from(completion.processed_districts); + self.conformance.delivery_skipped_districts += u64::from(completion.skipped_districts); + let lag_ms = completion + .completed_at + .to_micros_since_unix_epoch() + .saturating_sub(completion.queued_at.to_micros_since_unix_epoch()) + .max(0) as u64 + / 1_000; + self.delivery_completion_histogram.record(lag_ms); + } + + fn finalize(&mut self, meta: DriverSummaryMeta) -> Result { + self.writer.flush()?; + + let mut transactions = BTreeMap::new(); + let mut total_transactions = 0u64; + for kind in TransactionKind::ALL { + let summary = self + .by_kind + .get(kind.as_str()) + .expect("transaction kind exists") + .to_summary(); + total_transactions += summary.count; + transactions.insert(kind.as_str().to_string(), summary); + } + + let mut mix = BTreeMap::new(); + for kind in TransactionKind::ALL { + let count = transactions + .get(kind.as_str()) + .map(|summary| summary.count) + .unwrap_or(0); + let ratio = if total_transactions == 0 { + 0.0 + } else { + (count as f64) * 100.0 / (total_transactions as f64) + }; + mix.insert(kind.as_str().to_string(), ratio); + } + + let measure_minutes = if meta.measure_secs == 0 { + 0.0 + } else { + meta.measure_secs as f64 / 60.0 + }; + let new_order_success = transactions + .get(TransactionKind::NewOrder.as_str()) + .map(|summary| summary.success) + .unwrap_or(0); + let tpmc_like = if measure_minutes == 0.0 { + 0.0 + } else { + new_order_success as f64 / measure_minutes + }; + + let delivery_completed = self.conformance.delivery_completed; + let delivery_queued = self.conformance.delivery_queued; + let delivery = DeliverySummary { + queued: delivery_queued, + completed: delivery_completed, + pending: delivery_queued.saturating_sub(delivery_completed), + processed_districts: self.conformance.delivery_processed_districts, + skipped_districts: self.conformance.delivery_skipped_districts, + completion_mean_ms: self.delivery_completion_histogram.mean_ms(), + completion_p50_ms: self.delivery_completion_histogram.percentile_ms(0.50), + completion_p95_ms: self.delivery_completion_histogram.percentile_ms(0.95), + completion_p99_ms: self.delivery_completion_histogram.percentile_ms(0.99), + completion_max_ms: self.delivery_completion_histogram.max_ms, + completion_histogram: self.delivery_completion_histogram.clone(), + }; + + Ok(DriverSummary { + run_id: meta.run_id, + driver_id: meta.driver_id, + uri: meta.uri, + database: meta.database, + terminal_start: meta.terminal_start, + terminals: meta.terminals, + warehouse_count: meta.warehouse_count, + warmup_secs: meta.warmup_secs, + measure_secs: meta.measure_secs, + measure_start_ms: meta.measure_start_ms, + measure_end_ms: meta.measure_end_ms, + generated_at_ms: now_millis(), + total_transactions, + tpmc_like, + transaction_mix: mix, + conformance: self.conformance.clone(), + transactions, + delivery, + }) + } +} + +pub fn aggregate_summaries(run_id: String, summaries: &[DriverSummary]) -> AggregateSummary { + let mut by_kind: BTreeMap = TransactionKind::ALL + .into_iter() + .map(|kind| (kind.as_str().to_string(), TransactionAccumulator::default())) + .collect(); + let mut total_transactions = 0u64; + let mut conformance = ConformanceSummary::default(); + let mut delivery_histogram = Histogram::default(); + let mut driver_names = Vec::with_capacity(summaries.len()); + + for summary in summaries { + driver_names.push(summary.driver_id.clone()); + total_transactions += summary.total_transactions; + conformance.new_order_rollbacks += summary.conformance.new_order_rollbacks; + conformance.new_order_total += summary.conformance.new_order_total; + conformance.new_order_remote_order_lines += summary.conformance.new_order_remote_order_lines; + conformance.new_order_total_order_lines += summary.conformance.new_order_total_order_lines; + conformance.payment_remote += summary.conformance.payment_remote; + conformance.payment_total += summary.conformance.payment_total; + conformance.payment_by_last_name += summary.conformance.payment_by_last_name; + conformance.order_status_by_last_name += summary.conformance.order_status_by_last_name; + conformance.order_status_total += summary.conformance.order_status_total; + conformance.delivery_queued += summary.conformance.delivery_queued; + conformance.delivery_completed += summary.conformance.delivery_completed; + conformance.delivery_processed_districts += summary.conformance.delivery_processed_districts; + conformance.delivery_skipped_districts += summary.conformance.delivery_skipped_districts; + delivery_histogram.merge(&summary.delivery.completion_histogram); + + for (name, txn) in &summary.transactions { + let acc = by_kind.get_mut(name).expect("kind exists"); + acc.success += txn.success; + acc.failure += txn.failure; + acc.histogram.merge(&txn.histogram); + } + } + + let mut transactions = BTreeMap::new(); + let mut mix = BTreeMap::new(); + for (name, acc) in by_kind { + let summary = acc.to_summary(); + let ratio = if total_transactions == 0 { + 0.0 + } else { + (summary.count as f64) * 100.0 / (total_transactions as f64) + }; + mix.insert(name.clone(), ratio); + transactions.insert(name, summary); + } + + let measure_secs = summaries.first().map(|summary| summary.measure_secs).unwrap_or(0); + let measure_minutes = if measure_secs == 0 { + 0.0 + } else { + measure_secs as f64 / 60.0 + }; + let tpmc_like = if measure_minutes == 0.0 { + 0.0 + } else { + transactions + .get(TransactionKind::NewOrder.as_str()) + .map(|summary| summary.success as f64 / measure_minutes) + .unwrap_or(0.0) + }; + + AggregateSummary { + run_id, + driver_count: summaries.len(), + drivers: driver_names, + generated_at_ms: now_millis(), + total_transactions, + tpmc_like, + transaction_mix: mix, + conformance: conformance.clone(), + transactions, + delivery: DeliverySummary { + queued: conformance.delivery_queued, + completed: conformance.delivery_completed, + pending: conformance + .delivery_queued + .saturating_sub(conformance.delivery_completed), + processed_districts: conformance.delivery_processed_districts, + skipped_districts: conformance.delivery_skipped_districts, + completion_mean_ms: delivery_histogram.mean_ms(), + completion_p50_ms: delivery_histogram.percentile_ms(0.50), + completion_p95_ms: delivery_histogram.percentile_ms(0.95), + completion_p99_ms: delivery_histogram.percentile_ms(0.99), + completion_max_ms: delivery_histogram.max_ms, + completion_histogram: delivery_histogram, + }, + } +} + +pub fn write_json(path: &Path, value: &T) -> Result<()> { + let file = File::create(path).with_context(|| format!("failed to create {}", path.display()))?; + serde_json::to_writer_pretty(file, value).with_context(|| format!("failed to write {}", path.display())) +} + +pub fn now_millis() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("system clock before unix epoch") + .as_millis() as u64 +} diff --git a/tools/tpcc-runner/src/tpcc.rs b/tools/tpcc-runner/src/tpcc.rs new file mode 100644 index 00000000000..384d615e312 --- /dev/null +++ b/tools/tpcc-runner/src/tpcc.rs @@ -0,0 +1,160 @@ +use rand::Rng; +use std::time::Duration; + +use crate::summary::TransactionKind; + +pub const DISTRICTS_PER_WAREHOUSE: u8 = 10; +pub const CUSTOMERS_PER_DISTRICT: u32 = 3_000; +pub const ITEMS: u32 = 100_000; +pub const NEW_ORDER_START: u32 = 2_101; + +const LAST_NAME_PARTS: [&str; 10] = [ + "BAR", "OUGHT", "ABLE", "PRI", "PRES", "ESE", "ANTI", "CALLY", "ATION", "EING", +]; + +#[derive(Clone, Debug)] +pub struct RunConstants { + pub c_last: u32, + pub c_id: u32, + pub order_line_item: u32, +} + +#[derive(Clone, Copy, Debug)] +pub struct TerminalAssignment { + pub terminal_id: u32, + pub warehouse_id: u16, + pub district_id: u8, +} + +pub fn assign_terminal(terminal_id: u32, warehouse_count: u16) -> Option { + let zero = terminal_id.checked_sub(1)?; + let warehouse_zero = zero / u32::from(DISTRICTS_PER_WAREHOUSE); + if warehouse_zero >= u32::from(warehouse_count) { + return None; + } + let district_zero = zero % u32::from(DISTRICTS_PER_WAREHOUSE); + Some(TerminalAssignment { + terminal_id, + warehouse_id: (warehouse_zero + 1) as u16, + district_id: (district_zero + 1) as u8, + }) +} + +pub fn choose_transaction(rng: &mut R) -> TransactionKind { + let roll = rng.random_range(1..=100); + match roll { + 1..=45 => TransactionKind::NewOrder, + 46..=88 => TransactionKind::Payment, + 89..=92 => TransactionKind::OrderStatus, + 93..=96 => TransactionKind::Delivery, + _ => TransactionKind::StockLevel, + } +} + +pub fn generate_run_constants(rng: &mut R) -> RunConstants { + RunConstants { + c_last: rng.random_range(0..=255), + c_id: rng.random_range(0..=1_023), + order_line_item: rng.random_range(0..=8_191), + } +} + +pub fn nurand(rng: &mut R, a: u32, x: u32, y: u32, c: u32) -> u32 { + (((rng.random_range(0..=a) | rng.random_range(x..=y)) + c) % (y - x + 1)) + x +} + +pub fn customer_id(rng: &mut R, constants: &RunConstants) -> u32 { + nurand(rng, 1_023, 1, CUSTOMERS_PER_DISTRICT, constants.c_id) +} + +pub fn item_id(rng: &mut R, constants: &RunConstants) -> u32 { + nurand(rng, 8_191, 1, ITEMS, constants.order_line_item) +} + +pub fn customer_last_name(rng: &mut R, constants: &RunConstants) -> String { + make_last_name(nurand(rng, 255, 0, 999, constants.c_last)) +} + +pub fn make_last_name(num: u32) -> String { + let hundreds = ((num / 100) % 10) as usize; + let tens = ((num / 10) % 10) as usize; + let ones = (num % 10) as usize; + format!( + "{}{}{}", + LAST_NAME_PARTS[hundreds], LAST_NAME_PARTS[tens], LAST_NAME_PARTS[ones] + ) +} + +pub fn alpha_string(rng: &mut R, min_len: usize, max_len: usize) -> String { + let len = rng.random_range(min_len..=max_len); + (0..len).map(|_| (b'A' + rng.random_range(0..26)) as char).collect() +} + +pub fn numeric_string(rng: &mut R, min_len: usize, max_len: usize) -> String { + let len = rng.random_range(min_len..=max_len); + (0..len).map(|_| (b'0' + rng.random_range(0..10)) as char).collect() +} + +pub fn alpha_numeric_string(rng: &mut R, min_len: usize, max_len: usize) -> String { + let len = rng.random_range(min_len..=max_len); + (0..len) + .map(|_| { + if rng.random_bool(0.5) { + (b'A' + rng.random_range(0..26)) as char + } else { + (b'0' + rng.random_range(0..10)) as char + } + }) + .collect() +} + +pub fn zip_code(rng: &mut R) -> String { + format!("{}11111", numeric_string(rng, 4, 4)) +} + +pub fn maybe_with_original(rng: &mut R, min_len: usize, max_len: usize) -> String { + let mut data = alpha_numeric_string(rng, min_len, max_len); + if rng.random_bool(0.10) && data.len() >= 8 { + let start = rng.random_range(0..=(data.len() - 8)); + data.replace_range(start..start + 8, "ORIGINAL"); + } + data +} + +pub fn keying_time(kind: TransactionKind, scale: f64) -> Duration { + scaled_duration( + match kind { + TransactionKind::NewOrder => 18.0, + TransactionKind::Payment => 3.0, + TransactionKind::OrderStatus => 2.0, + TransactionKind::Delivery => 2.0, + TransactionKind::StockLevel => 2.0, + }, + scale, + ) +} + +pub fn think_time(kind: TransactionKind, scale: f64, rng: &mut R) -> Duration { + let mean_secs = match kind { + TransactionKind::NewOrder => 12.0, + TransactionKind::Payment => 12.0, + TransactionKind::OrderStatus => 10.0, + TransactionKind::Delivery => 5.0, + TransactionKind::StockLevel => 5.0, + }; + if scale <= 0.0 { + return Duration::ZERO; + } + let mean_secs = mean_secs * scale; + let uniform = rng.random_range(f64::MIN_POSITIVE..1.0); + let sample = (-mean_secs * uniform.ln()).min(mean_secs * 10.0); + Duration::from_secs_f64(sample) +} + +fn scaled_duration(base_secs: f64, scale: f64) -> Duration { + if scale <= 0.0 { + Duration::ZERO + } else { + Duration::from_secs_f64(base_secs * scale) + } +}