@@ -728,15 +728,17 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
728728#[ cfg( test) ]
729729mod tests {
730730 use bitcoin:: BlockHeader ;
731- use :: { check_added_monitors, check_closed_broadcast, check_closed_event, expect_payment_sent} ;
732- use :: { get_local_commitment_txn, get_route_and_payment_hash, unwrap_send_err} ;
731+ use :: { check_added_monitors, check_closed_broadcast, check_closed_event} ;
732+ use :: { expect_payment_sent, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg} ;
733+ use :: { get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err} ;
733734 use chain:: { ChannelMonitorUpdateErr , Confirm , Watch } ;
734735 use chain:: channelmonitor:: LATENCY_GRACE_PERIOD_BLOCKS ;
735736 use ln:: channelmanager:: PaymentSendFailure ;
736737 use ln:: features:: InitFeatures ;
737738 use ln:: functional_test_utils:: * ;
739+ use ln:: msgs:: ChannelMessageHandler ;
738740 use util:: errors:: APIError ;
739- use util:: events:: { ClosureReason , MessageSendEventsProvider } ;
741+ use util:: events:: { ClosureReason , MessageSendEvent , MessageSendEventsProvider } ;
740742 use util:: test_utils:: { OnRegisterOutput , TxOutReference } ;
741743
742744 /// Tests that in-block dependent transactions are processed by `block_connected` when not
@@ -782,6 +784,81 @@ mod tests {
782784 nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
783785 }
784786
787+ #[ test]
788+ fn test_async_ooo_offchain_updates ( ) {
789+ // Test that if we have multiple offchain updates being persisted and they complete
790+ // out-of-order, the ChainMonitor waits until all have completed before informing the
791+ // ChannelManager.
792+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
793+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
794+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
795+ let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
796+ create_announced_chan_between_nodes ( & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
797+
798+ // Route two payments to be claimed at the same time.
799+ let payment_preimage_1 = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) . 0 ;
800+ let payment_preimage_2 = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) . 0 ;
801+
802+ chanmon_cfgs[ 1 ] . persister . offchain_monitor_updates . lock ( ) . unwrap ( ) . clear ( ) ;
803+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( Err ( ChannelMonitorUpdateErr :: TemporaryFailure ) ) ;
804+
805+ nodes[ 1 ] . node . claim_funds ( payment_preimage_1) ;
806+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
807+ nodes[ 1 ] . node . claim_funds ( payment_preimage_2) ;
808+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
809+
810+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( Ok ( ( ) ) ) ;
811+
812+ let persistences = chanmon_cfgs[ 1 ] . persister . offchain_monitor_updates . lock ( ) . unwrap ( ) . clone ( ) ;
813+ assert_eq ! ( persistences. len( ) , 1 ) ;
814+ let ( funding_txo, updates) = persistences. iter ( ) . next ( ) . unwrap ( ) ;
815+ assert_eq ! ( updates. len( ) , 2 ) ;
816+
817+ // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
818+ // fail either way but if it fails intermittently it's depending on the ordering of updates.
819+ let mut update_iter = updates. iter ( ) ;
820+ nodes[ 1 ] . chain_monitor . chain_monitor . channel_monitor_updated ( * funding_txo, update_iter. next ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
821+ assert ! ( nodes[ 1 ] . chain_monitor. release_pending_monitor_events( ) . is_empty( ) ) ;
822+ assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
823+ nodes[ 1 ] . chain_monitor . chain_monitor . channel_monitor_updated ( * funding_txo, update_iter. next ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
824+
825+ // Now manually walk the commitment signed dance - because we claimed two payments
826+ // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
827+
828+ let updates = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
829+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & updates. update_fulfill_htlcs [ 0 ] ) ;
830+ expect_payment_sent_without_paths ! ( nodes[ 0 ] , payment_preimage_1) ;
831+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & updates. commitment_signed ) ;
832+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
833+ let ( as_first_raa, as_first_update) = get_revoke_commit_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
834+
835+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_first_raa) ;
836+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
837+ let bs_second_updates = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
838+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_first_update) ;
839+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
840+ let bs_first_raa = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
841+
842+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_updates. update_fulfill_htlcs [ 0 ] ) ;
843+ expect_payment_sent_without_paths ! ( nodes[ 0 ] , payment_preimage_2) ;
844+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_updates. commitment_signed ) ;
845+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
846+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_first_raa) ;
847+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
848+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
849+ let ( as_second_raa, as_second_update) = get_revoke_commit_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
850+
851+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_second_raa) ;
852+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
853+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_second_update) ;
854+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
855+ let bs_second_raa = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
856+
857+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_raa) ;
858+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
859+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
860+ }
861+
785862 fn do_chainsync_pauses_events ( block_timeout : bool ) {
786863 // When a chainsync monitor update occurs, any MonitorUpdates should be held before being
787864 // passed upstream to a `ChannelManager` via `Watch::release_pending_monitor_events`. This
0 commit comments