@@ -639,8 +639,8 @@ where C::Target: chain::Filter,
639639 let monitor = & monitor_state. monitor ;
640640 log_trace ! ( self . logger, "Updating ChannelMonitor for channel {}" , log_funding_info!( monitor) ) ;
641641 let update_res = monitor. update_monitor ( & update, & self . broadcaster , & self . fee_estimator , & self . logger ) ;
642- if let Err ( e ) = & update_res {
643- log_error ! ( self . logger, "Failed to update ChannelMonitor for channel {}: {:?} " , log_funding_info!( monitor) , e ) ;
642+ if update_res . is_err ( ) {
643+ log_error ! ( self . logger, "Failed to update ChannelMonitor for channel {}. " , log_funding_info!( monitor) ) ;
644644 }
645645 // Even if updating the monitor returns an error, the monitor's state will
646646 // still be changed. So, persist the updated monitor despite the error.
@@ -727,10 +727,18 @@ impl<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: Deref, P: Deref> even
727727
728728#[ cfg( test) ]
729729mod tests {
730- use :: { check_added_monitors, get_local_commitment_txn} ;
730+ use bitcoin:: BlockHeader ;
731+ use :: { check_added_monitors, check_closed_broadcast, check_closed_event} ;
732+ use :: { expect_payment_sent, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg} ;
733+ use :: { get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err} ;
734+ use chain:: { ChannelMonitorUpdateErr , Confirm , Watch } ;
735+ use chain:: channelmonitor:: LATENCY_GRACE_PERIOD_BLOCKS ;
736+ use ln:: channelmanager:: PaymentSendFailure ;
731737 use ln:: features:: InitFeatures ;
732738 use ln:: functional_test_utils:: * ;
733- use util:: events:: MessageSendEventsProvider ;
739+ use ln:: msgs:: ChannelMessageHandler ;
740+ use util:: errors:: APIError ;
741+ use util:: events:: { ClosureReason , MessageSendEvent , MessageSendEventsProvider } ;
734742 use util:: test_utils:: { OnRegisterOutput , TxOutReference } ;
735743
736744 /// Tests that in-block dependent transactions are processed by `block_connected` when not
@@ -775,4 +783,179 @@ mod tests {
775783 nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
776784 nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
777785 }
786+
787+ #[ test]
788+ fn test_async_ooo_offchain_updates ( ) {
789+ // Test that if we have multiple offchain updates being persisted and they complete
790+ // out-of-order, the ChainMonitor waits until all have completed before informing the
791+ // ChannelManager.
792+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
793+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
794+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
795+ let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
796+ create_announced_chan_between_nodes ( & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
797+
798+ // Route two payments to be claimed at the same time.
799+ let payment_preimage_1 = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) . 0 ;
800+ let payment_preimage_2 = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) . 0 ;
801+
802+ chanmon_cfgs[ 1 ] . persister . offchain_monitor_updates . lock ( ) . unwrap ( ) . clear ( ) ;
803+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( Err ( ChannelMonitorUpdateErr :: TemporaryFailure ) ) ;
804+
805+ nodes[ 1 ] . node . claim_funds ( payment_preimage_1) ;
806+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
807+ nodes[ 1 ] . node . claim_funds ( payment_preimage_2) ;
808+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
809+
810+ chanmon_cfgs[ 1 ] . persister . set_update_ret ( Ok ( ( ) ) ) ;
811+
812+ let persistences = chanmon_cfgs[ 1 ] . persister . offchain_monitor_updates . lock ( ) . unwrap ( ) . clone ( ) ;
813+ assert_eq ! ( persistences. len( ) , 1 ) ;
814+ let ( funding_txo, updates) = persistences. iter ( ) . next ( ) . unwrap ( ) ;
815+ assert_eq ! ( updates. len( ) , 2 ) ;
816+
817+ // Note that updates is a HashMap so the ordering here is actually random. This shouldn't
818+ // fail either way but if it fails intermittently it's depending on the ordering of updates.
819+ let mut update_iter = updates. iter ( ) ;
820+ nodes[ 1 ] . chain_monitor . chain_monitor . channel_monitor_updated ( * funding_txo, update_iter. next ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
821+ assert ! ( nodes[ 1 ] . chain_monitor. release_pending_monitor_events( ) . is_empty( ) ) ;
822+ assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
823+ nodes[ 1 ] . chain_monitor . chain_monitor . channel_monitor_updated ( * funding_txo, update_iter. next ( ) . unwrap ( ) . clone ( ) ) . unwrap ( ) ;
824+
825+ // Now manually walk the commitment signed dance - because we claimed two payments
826+ // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
827+
828+ let updates = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
829+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & updates. update_fulfill_htlcs [ 0 ] ) ;
830+ expect_payment_sent_without_paths ! ( nodes[ 0 ] , payment_preimage_1) ;
831+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & updates. commitment_signed ) ;
832+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
833+ let ( as_first_raa, as_first_update) = get_revoke_commit_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
834+
835+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_first_raa) ;
836+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
837+ let bs_second_updates = get_htlc_update_msgs ! ( nodes[ 1 ] , nodes[ 0 ] . node. get_our_node_id( ) ) ;
838+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_first_update) ;
839+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
840+ let bs_first_raa = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
841+
842+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_updates. update_fulfill_htlcs [ 0 ] ) ;
843+ expect_payment_sent_without_paths ! ( nodes[ 0 ] , payment_preimage_2) ;
844+ nodes[ 0 ] . node . handle_commitment_signed ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_updates. commitment_signed ) ;
845+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
846+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_first_raa) ;
847+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
848+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
849+ let ( as_second_raa, as_second_update) = get_revoke_commit_msgs ! ( nodes[ 0 ] , nodes[ 1 ] . node. get_our_node_id( ) ) ;
850+
851+ nodes[ 1 ] . node . handle_revoke_and_ack ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_second_raa) ;
852+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
853+ nodes[ 1 ] . node . handle_commitment_signed ( & nodes[ 0 ] . node . get_our_node_id ( ) , & as_second_update) ;
854+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
855+ let bs_second_raa = get_event_msg ! ( nodes[ 1 ] , MessageSendEvent :: SendRevokeAndACK , nodes[ 0 ] . node. get_our_node_id( ) ) ;
856+
857+ nodes[ 0 ] . node . handle_revoke_and_ack ( & nodes[ 1 ] . node . get_our_node_id ( ) , & bs_second_raa) ;
858+ expect_payment_path_successful ! ( nodes[ 0 ] ) ;
859+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
860+ }
861+
862+ fn do_chainsync_pauses_events ( block_timeout : bool ) {
863+ // When a chainsync monitor update occurs, any MonitorUpdates should be held before being
864+ // passed upstream to a `ChannelManager` via `Watch::release_pending_monitor_events`. This
865+ // tests that behavior, as well as some ways it might go wrong.
866+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
867+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
868+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
869+ let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
870+ let channel = create_announced_chan_between_nodes (
871+ & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
872+
873+ // Get a route for later and rebalance the channel somewhat
874+ send_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 10_000_000 ) ;
875+ let ( route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash ! ( nodes[ 0 ] , nodes[ 1 ] , 100_000 ) ;
876+
877+ // First route a payment that we will claim on chain and give the recipient the preimage.
878+ let payment_preimage = route_payment ( & nodes[ 0 ] , & [ & nodes[ 1 ] ] , 1_000_000 ) . 0 ;
879+ nodes[ 1 ] . node . claim_funds ( payment_preimage) ;
880+ nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
881+ check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
882+ let remote_txn = get_local_commitment_txn ! ( nodes[ 1 ] , channel. 2 ) ;
883+ assert_eq ! ( remote_txn. len( ) , 2 ) ;
884+
885+ // Temp-fail the block connection which will hold the channel-closed event
886+ chanmon_cfgs[ 0 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
887+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( Err ( ChannelMonitorUpdateErr :: TemporaryFailure ) ) ;
888+
889+ // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
890+ // channel is now closed, but the ChannelManager doesn't know that yet.
891+ let new_header = BlockHeader {
892+ version : 2 , time : 0 , bits : 0 , nonce : 0 ,
893+ prev_blockhash : nodes[ 0 ] . best_block_info ( ) . 0 ,
894+ merkle_root : Default :: default ( ) } ;
895+ nodes[ 0 ] . chain_monitor . chain_monitor . transactions_confirmed ( & new_header,
896+ & [ ( 0 , & remote_txn[ 0 ] ) , ( 1 , & remote_txn[ 1 ] ) ] , nodes[ 0 ] . best_block_info ( ) . 1 + 1 ) ;
897+ assert ! ( nodes[ 0 ] . chain_monitor. release_pending_monitor_events( ) . is_empty( ) ) ;
898+ nodes[ 0 ] . chain_monitor . chain_monitor . best_block_updated ( & new_header, nodes[ 0 ] . best_block_info ( ) . 1 + 1 ) ;
899+ assert ! ( nodes[ 0 ] . chain_monitor. release_pending_monitor_events( ) . is_empty( ) ) ;
900+
901+ // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
902+ // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
903+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( Ok ( ( ) ) ) ;
904+ unwrap_send_err ! ( nodes[ 0 ] . node. send_payment( & route, second_payment_hash, & Some ( second_payment_secret) ) ,
905+ true , APIError :: ChannelUnavailable { ref err } ,
906+ assert!( err. contains( "ChannelMonitor storage failure" ) ) ) ;
907+ check_added_monitors ! ( nodes[ 0 ] , 2 ) ; // After the failure we generate a close-channel monitor update
908+ check_closed_broadcast ! ( nodes[ 0 ] , true ) ;
909+ check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: ProcessingError { err: "ChannelMonitor storage failure" . to_string( ) } ) ;
910+
911+ // However, as the ChainMonitor is still waiting for the original persistence to complete,
912+ // it won't yet release the MonitorEvents.
913+ assert ! ( nodes[ 0 ] . chain_monitor. release_pending_monitor_events( ) . is_empty( ) ) ;
914+
915+ if block_timeout {
916+ // After three blocks, pending MontiorEvents should be released either way.
917+ let latest_header = BlockHeader {
918+ version : 2 , time : 0 , bits : 0 , nonce : 0 ,
919+ prev_blockhash : nodes[ 0 ] . best_block_info ( ) . 0 ,
920+ merkle_root : Default :: default ( ) } ;
921+ nodes[ 0 ] . chain_monitor . chain_monitor . best_block_updated ( & latest_header, nodes[ 0 ] . best_block_info ( ) . 1 + LATENCY_GRACE_PERIOD_BLOCKS ) ;
922+ } else {
923+ for ( funding_outpoint, update_ids) in chanmon_cfgs[ 0 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( ) . iter ( ) {
924+ for update_id in update_ids {
925+ nodes[ 0 ] . chain_monitor . chain_monitor . channel_monitor_updated ( * funding_outpoint, * update_id) . unwrap ( ) ;
926+ }
927+ }
928+ }
929+
930+ expect_payment_sent ! ( nodes[ 0 ] , payment_preimage) ;
931+ }
932+
933+ #[ test]
934+ fn chainsync_pauses_events ( ) {
935+ do_chainsync_pauses_events ( false ) ;
936+ do_chainsync_pauses_events ( true ) ;
937+ }
938+
939+ #[ test]
940+ fn update_during_chainsync_fails_channel ( ) {
941+ let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
942+ let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs) ;
943+ let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs, & [ None , None ] ) ;
944+ let nodes = create_network ( 2 , & node_cfgs, & node_chanmgrs) ;
945+ create_announced_chan_between_nodes ( & nodes, 0 , 1 , InitFeatures :: known ( ) , InitFeatures :: known ( ) ) ;
946+
947+ chanmon_cfgs[ 0 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
948+ chanmon_cfgs[ 0 ] . persister . set_update_ret ( Err ( ChannelMonitorUpdateErr :: PermanentFailure ) ) ;
949+
950+ connect_blocks ( & nodes[ 0 ] , 1 ) ;
951+ // Before processing events, the ChannelManager will still think the Channel is open and
952+ // there won't be any ChannelMonitorUpdates
953+ assert_eq ! ( nodes[ 0 ] . node. list_channels( ) . len( ) , 1 ) ;
954+ check_added_monitors ! ( nodes[ 0 ] , 0 ) ;
955+ // ... however once we get events once, the channel will close, creating a channel-closed
956+ // ChannelMonitorUpdate.
957+ check_closed_broadcast ! ( nodes[ 0 ] , true ) ;
958+ check_closed_event ! ( nodes[ 0 ] , 1 , ClosureReason :: ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync" . to_string( ) } ) ;
959+ check_added_monitors ! ( nodes[ 0 ] , 1 ) ;
960+ }
778961}
0 commit comments