Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@ bitcoin-payment-instructions = { version = "0.5.0", default-features = false, fe
"http",
] }
# Branch: https://github.com/moneydevkit/ldk-node/commits/lsp-0.7.0_accept-underpaying-htlcs_with_timing_logs
ldk-node = { default-features = false, git = "https://github.com/moneydevkit/ldk-node.git", rev = "5baa1f83a13407818b069b1f990157c8761eb982" }
ldk-node = { default-features = false, git = "https://github.com/moneydevkit/ldk-node.git", rev = "5dce44b6e795560bbf62f49d3648308ce88a0586" }
#ldk-node = { path = "../ldk-node" }

napi = { version = "2", features = ["napi4"] }
napi-derive = "2"
tokio = { version = "1", features = ["rt-multi-thread"] }
tokio-util = { version = "0.7", default-features = false }
writeable = { version = "=0.6.2", features = ["alloc"] }

[build-dependencies]
Expand Down
26 changes: 24 additions & 2 deletions index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,18 @@ export interface MdkNodeOptions {
lspNodeId: string
lspAddress: string
scoringParamOverrides?: ScoringParamOverrides
splice?: SpliceConfig
}
/**
* Configuration for the auto-splice manager. The manager wakes up every
* `poll_interval_secs`, reads the spendable on-chain balance, and splices it
* into the largest usable LSP channel when one is available.
*/
export interface SpliceConfig {
/** Enable the auto-splice background manager. Default: true. */
enabled?: boolean
/** Poll interval in seconds. Default: 30. */
pollIntervalSecs?: number
}
export interface PaymentMetadata {
bolt11: string
Expand Down Expand Up @@ -107,7 +119,12 @@ export declare class MdkNode {
getNodeId(): string
start(): void
stop(): void
/** Start the node and sync wallets. Call once before polling for events. */
/**
* Start the node and sync wallets. Call once before polling for events.
*
* If `splice.enabled` is set on construction (the default), also spawns
* the auto-splice background task on the dedicated splice runtime.
*/
startReceiving(): void
/**
* Get the next payment event without ACKing it.
Expand All @@ -120,7 +137,12 @@ export declare class MdkNode {
* Must be called after next_event() returns an event, before calling next_event() again.
*/
ackEvent(): void
/** Stop the node. Call when done polling. */
/**
* Stop the node. Call when done polling.
*
* Tears down the splice manager before stopping the node so the loop
* never sees a stopped node mid-tick.
*/
stopReceiving(): void
syncWallets(): void
getBalance(): number
Expand Down
129 changes: 125 additions & 4 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use std::{
fmt::Write,
str::FromStr,
sync::{
Arc, OnceLock, RwLock,
Arc, Mutex, OnceLock, RwLock,
atomic::{AtomicU8, Ordering},
},
time::{Duration, Instant},
Expand Down Expand Up @@ -52,10 +52,14 @@ use ldk_node::{
payment::PaymentKind,
};
use tokio::runtime::Runtime;
use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken;

#[macro_use]
extern crate napi_derive;

mod splice_manager;

/// Polling interval for event loops and state checks.
const POLL_INTERVAL: Duration = Duration::from_millis(10);

Expand Down Expand Up @@ -296,6 +300,44 @@ pub struct MdkNodeOptions {
pub lsp_node_id: String,
pub lsp_address: String,
pub scoring_param_overrides: Option<ScoringParamOverrides>,
pub splice: Option<SpliceConfig>,
}

/// Configuration for the auto-splice manager. The manager wakes up every
/// `poll_interval_secs`, reads the spendable on-chain balance, and splices it
/// into the largest usable LSP channel when one is available.
#[napi(object)]
pub struct SpliceConfig {
/// Enable the auto-splice background manager. Default: true.
pub enabled: Option<bool>,
/// Poll interval in seconds. Default: 30.
pub poll_interval_secs: Option<u32>,
}

/// Resolved splice config with defaults applied. Internal.
#[derive(Debug, Clone, Copy)]
pub(crate) struct ResolvedSpliceConfig {
pub(crate) enabled: bool,
pub(crate) poll_interval: Duration,
}

impl ResolvedSpliceConfig {
fn from_options(cfg: Option<SpliceConfig>) -> Self {
let default = Self {
enabled: true,
poll_interval: Duration::from_secs(30),
};
match cfg {
None => default,
Some(c) => Self {
enabled: c.enabled.unwrap_or(default.enabled),
poll_interval: c
.poll_interval_secs
.map(|s| Duration::from_secs(s as u64))
.unwrap_or(default.poll_interval),
Comment on lines +335 to +337
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Reject zero poll interval for splice manager

Clamp or validate poll_interval_secs to a positive value before building Duration: a user-provided 0 currently becomes Duration::from_secs(0), and the manager loop then repeatedly hits sleep(0) and tick() as fast as possible. In production this can create a hot loop that pegs CPU and repeatedly calls wallet/channel APIs (and potentially splice RPCs) instead of polling.

Useful? React with 👍 / 👎.

},
}
}
}

#[napi(object)]
Expand Down Expand Up @@ -365,8 +407,21 @@ pub struct NodeChannel {

#[napi]
pub struct MdkNode {
node: Option<Node>,
node: Option<Arc<Node>>,
network: Network,
/// Cached LSP pubkey. Used by the splice manager to filter eligible
/// channels by counterparty.
lsp_pubkey: PublicKey,
splice_cfg: ResolvedSpliceConfig,
/// One-worker tokio runtime dedicated to the splice manager.
splice_runtime: Runtime,
/// `Some` while a splice manager is running, `None` otherwise.
splice_task: Mutex<Option<SpliceTask>>,
}

struct SpliceTask {
shutdown: CancellationToken,
join: JoinHandle<()>,
}

#[napi]
Expand Down Expand Up @@ -470,9 +525,25 @@ impl MdkNode {
.build_with_vss_store_and_fixed_headers(options.vss_url, vss_identifier, vss_headers)
.map_err(|err| napi::Error::from_reason(err.to_string()))?;

let splice_cfg = ResolvedSpliceConfig::from_options(options.splice);

// One self-driving worker is enough; the manager sleeps between ticks.
let splice_runtime = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.thread_name("mdk-splice")
.enable_all()
.build()
.map_err(|e| {
napi::Error::from_reason(format!("failed to build splice runtime: {e}"))
})?;

Ok(Self {
node: Some(node),
node: Some(Arc::new(node)),
network,
lsp_pubkey: lsp_node_id,
splice_cfg,
splice_runtime,
splice_task: Mutex::new(None),
})
}

Expand All @@ -481,6 +552,29 @@ impl MdkNode {
self.node.as_ref().expect("MdkNode has been destroyed")
}

/// Clone the inner `Arc<Node>` for handing to background tasks. Panics if
/// the node has been destroyed.
fn node_arc(&self) -> Arc<Node> {
Arc::clone(self.node.as_ref().expect("MdkNode has been destroyed"))
}

/// Cancel the splice task (if any) and block until it exits.
///
/// Bounded by however long the in-flight `tick()` takes to return — usually
/// trivial, but a tick mid-`splice_in` is blocked on an LSP round-trip.
///
/// Must be called from a non-tokio context (JS thread is fine);
/// `block_on` panics from inside a runtime.
fn shutdown_splice_task(&self) {
let task = self.splice_task.lock().unwrap().take();
if let Some(SpliceTask { shutdown, join }) = task {
shutdown.cancel();
if let Err(e) = self.splice_runtime.block_on(join) {
eprintln!("[lightning-js] Splice task ended abnormally: {e}");
}
}
}

/// Destroy the node, dropping the inner Rust Node and its tokio runtime immediately.
/// This prevents zombie processes on serverless platforms where GC is non-deterministic.
/// After calling destroy(), any further method calls on this node will panic.
Expand All @@ -492,6 +586,9 @@ impl MdkNode {
/// them and sending a webhook.
#[napi]
pub fn destroy(&mut self) -> napi::Result<()> {
// Drop the splice task first so its Arc<Node> is released before we drop
// the inner Node.
self.shutdown_splice_task();
if let Some(node) = self.node.take() {
node.disconnect_all_peers();
let _ = node.stop();
Expand Down Expand Up @@ -522,6 +619,9 @@ impl MdkNode {
}

/// Start the node and sync wallets. Call once before polling for events.
///
/// If `splice.enabled` is set on construction (the default), also spawns
/// the auto-splice background task on the dedicated splice runtime.
#[napi]
pub fn start_receiving(&self) -> napi::Result<()> {
self.node().start().map_err(|e| {
Expand All @@ -533,7 +633,24 @@ impl MdkNode {
eprintln!("[lightning-js] Failed to sync wallets in start_receiving: {e}");
let _ = self.node().stop();
napi::Error::from_reason(format!("Failed to sync: {e}"))
})
})?;

if self.splice_cfg.enabled {
// Defensive: if a prior session leaked a task (or start_receiving is
// double-invoked), cancel + join the previous one before spawning.
self.shutdown_splice_task();
let shutdown = CancellationToken::new();
let join = splice_manager::spawn(
self.node_arc(),
self.lsp_pubkey,
self.splice_cfg,
shutdown.clone(),
self.splice_runtime.handle(),
);
*self.splice_task.lock().unwrap() = Some(SpliceTask { shutdown, join });
Comment on lines +638 to +650
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Tie auto-splice task lifecycle to all node stop paths

The splice worker is started from start_receiving but only canceled in stop_receiving/destroy, so callers that use the existing stop() path after start_receiving() leave the background task alive. That task can continue polling and attempting work against a stopped node, which is surprising for a stop operation and can cause unintended background activity until destroy is called.

Useful? React with 👍 / 👎.

}

Ok(())
}

/// Get the next payment event without ACKing it.
Expand Down Expand Up @@ -645,8 +762,12 @@ impl MdkNode {
}

/// Stop the node. Call when done polling.
///
/// Tears down the splice manager before stopping the node so the loop
/// never sees a stopped node mid-tick.
#[napi]
pub fn stop_receiving(&self) -> napi::Result<()> {
self.shutdown_splice_task();
self
.node()
.stop()
Expand Down
Loading
Loading