From 653692eafccb362b3488777c90e5d43c134bc33f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 4 Jan 2026 20:07:20 +0000 Subject: [PATCH 1/7] Drop lockorder comments on `ChannelManager` While its nice to document things, the lockorder comment at the top of `ChannelManager` is just annoying to always update and doesn't add all that much value. Developers likely shouldn't be checking it while writing code, our automated lockorder issue detection framework more than suffices to catch any bugs in test-reachable code. That makes it basically write-only which isn't exactly a useful comment. --- lightning/src/ln/channelmanager.rs | 62 ------------------------------ 1 file changed, 62 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 10c77505408..95442ea3370 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2629,46 +2629,6 @@ where /// [`update_channel`]: chain::Watch::update_channel /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`read`]: ReadableArgs::read -// -// Lock order: -// The tree structure below illustrates the lock order requirements for the different locks of the -// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree, -// and should then be taken in the order of the lowest to the highest level in the tree. -// Note that locks on different branches shall not be taken at the same time, as doing so will -// create a new lock order for those specific locks in the order they were taken. -// -// Lock order tree: -// -// `pending_offers_messages` -// -// `pending_async_payments_messages` -// -// `total_consistency_lock` -// | -// |__`forward_htlcs` -// | -// |__`pending_intercepted_htlcs` -// | -// |__`decode_update_add_htlcs` -// | -// |__`per_peer_state` -// | -// |__`claimable_payments` -// | -// |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds -// | -// |__`peer_state` -// | -// |__`short_to_chan_info` -// | -// |__`outbound_scid_aliases` -// | -// |__`best_block` -// | -// |__`pending_events` -// | -// |__`pending_background_events` -// pub struct ChannelManager< M: Deref, T: Deref, @@ -2702,11 +2662,9 @@ pub struct ChannelManager< #[cfg(not(test))] flow: OffersMessageFlow, - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(any(test, feature = "_test_utils"))] pub(super) best_block: RwLock, #[cfg(not(any(test, feature = "_test_utils")))] - /// See `ChannelManager` struct-level documentation for lock order requirements. best_block: RwLock, pub(super) secp_ctx: Secp256k1, @@ -2720,8 +2678,6 @@ pub struct ChannelManager< /// after reloading from disk while replaying blocks against ChannelMonitors. /// /// See `PendingOutboundPayment` documentation for more info. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. pending_outbound_payments: OutboundPayments, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. @@ -2732,8 +2688,6 @@ pub struct ChannelManager< /// /// Note that no consistency guarantees are made about the existence of a channel with the /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`! - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(test)] pub(super) forward_htlcs: Mutex>>, #[cfg(not(test))] @@ -2746,8 +2700,6 @@ pub struct ChannelManager< /// (or timeout) /// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a /// [`ReleaseHeldHtlc`] onion message from an often-offline recipient - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. pending_intercepted_htlcs: Mutex>, /// Outbound SCID Alias -> pending `update_add_htlc`s to decode. @@ -2755,22 +2707,16 @@ pub struct ChannelManager< /// /// Note that no consistency guarantees are made about the existence of a channel with the /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`! - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. decode_update_add_htlcs: Mutex>>, /// The sets of payments which are claimable or currently being claimed. See /// [`ClaimablePayments`]' individual field docs for more info. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. claimable_payments: Mutex, /// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// and some closed channels which reached a usable state prior to being closed. This is used /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the /// active channel list on load. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. outbound_scid_aliases: Mutex>, /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s. @@ -2782,8 +2728,6 @@ pub struct ChannelManager< /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a /// channel with the `channel_id` in our other maps. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(test)] pub(super) short_to_chan_info: FairRwLock>, #[cfg(not(test))] @@ -2824,8 +2768,6 @@ pub struct ChannelManager< /// channels. /// /// Note that the same thread must never acquire two inner `PeerState` locks at the same time. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] per_peer_state: FairRwLock>>>, #[cfg(any(test, feature = "_test_utils"))] @@ -2846,8 +2788,6 @@ pub struct ChannelManager< /// /// Note that events MUST NOT be removed from pending_events after deserialization, as they /// could be in the middle of being processed without the direct mutex held. - /// - /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(not(any(test, feature = "_test_utils")))] pending_events: Mutex)>>, #[cfg(any(test, feature = "_test_utils"))] @@ -2868,8 +2808,6 @@ pub struct ChannelManager< /// /// Thus, we place them here to be handled as soon as possible once we are running normally. /// - /// See `ChannelManager` struct-level documentation for lock order requirements. - /// /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pending_background_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. From b5d1f7e2ef2d5c37cd2944f60ec90e509f9d4372 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 3 Jan 2026 17:37:02 +0000 Subject: [PATCH 2/7] Move HTLC holding into HTLC decode from `forward_htlcs` When we added support for async payments (which requires holding HTLCs until we receive an onion message), we added the hold logic to `ChannelManager::forward_htlcs`. This made sense as we reused the forwarding datastructure in the holding logic so already had the right types in place, but it turns out only a single call of `forward_htlcs` should ever result in an HTLC being held. All of the other calls (un-holding an HTLC, forwarding an intercepted HTLC, forwarding an HTLC decoded by LDK prior to 0.2, or processing a phantom receive) should never result in an HTLC being held. Instead, HTLCs should actually only ever be held when the HTLC is decoded in `process_pending_update_add_htlcs` before forwarding. Because of this, and because we want to move the interception (and thus also the holding logic) out of `forward_htlcs`, here we move the holding logic into `process_pending_update_add_htlcs`. --- lightning/src/ln/channelmanager.rs | 80 ++++++++++++++++++------------ 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 95442ea3370..04f25bc76ff 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6967,7 +6967,53 @@ where incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { - Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), + Ok(info) => { + if info.routing.should_hold_htlc() { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + update_add_htlc.htlc_id, + &incoming_channel_id, + &incoming_counterparty_node_id, + ); + let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + match held_htlcs.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + log_trace!( + self.logger, + "Intercepted held HTLC with id {}, holding until the recipient is online", + intercept_id + ); + let pending_add = PendingAddHTLCInfo { + prev_outbound_scid_alias: incoming_scid_alias, + prev_counterparty_node_id: incoming_counterparty_node_id, + prev_funding_outpoint: incoming_funding_txo, + prev_channel_id: incoming_channel_id, + prev_htlc_id: update_add_htlc.htlc_id, + prev_user_channel_id: incoming_user_channel_id, + forward_info: info, + }; + entry.insert(pending_add); + }, + hash_map::Entry::Occupied(_) => { + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = get_htlc_failure_type( + outgoing_scid_opt, + update_add_htlc.payment_hash, + ); + htlc_fails.push((htlc_fail, failure_type, reason.into())); + }, + } + } else { + htlc_forwards.push((info, update_add_htlc.htlc_id)) + } + }, Err(inbound_err) => { let failure_type = get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); @@ -6991,7 +7037,7 @@ where incoming_funding_txo, incoming_channel_id, incoming_user_channel_id, - htlc_forwards.drain(..).collect(), + htlc_forwards, ); self.forward_htlcs(&mut [pending_forwards]); for (htlc_fail, failure_type, failure_reason) in htlc_fails.drain(..) { @@ -11902,35 +11948,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ )); }; - // In the case that we have an HTLC that we're supposed to hold onto until the - // recipient comes online *and* the outbound scid is encoded as - // `fake_scid::is_valid_intercept`, we should first wait for the recipient to come - // online before generating an `HTLCIntercepted` event, since the event cannot be - // acted on until the recipient is online to cooperatively open the JIT channel. Once - // we receive the `ReleaseHeldHtlc` message from the recipient, we will circle back - // here and resume generating the event below. - if pending_add.forward_info.routing.should_hold_htlc() { - let intercept_id = InterceptId::from_htlc_id_and_chan_id( - prev_htlc_id, - &prev_channel_id, - &prev_counterparty_node_id, - ); - let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); - match held_htlcs.entry(intercept_id) { - hash_map::Entry::Vacant(entry) => { - log_trace!( - logger, - "Intercepted held HTLC with id {}, holding until the recipient is online", - intercept_id - ); - entry.insert(pending_add); - }, - hash_map::Entry::Occupied(_) => { - debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); - fail_intercepted_htlc(pending_add); - }, - } - } else if !is_our_scid + if !is_our_scid && pending_add.forward_info.incoming_amt_msat.is_some() && fake_scid::is_valid_intercept( &self.fake_scid_rand_bytes, From d497ec206114215b81b596ae4fdda7d3bc1e90c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 3 Jan 2026 18:51:21 +0000 Subject: [PATCH 3/7] Fix the HTLC failure reason reported when a peer is offline If a peer is offline, but only recently went offline and thus the channel has not yet been marked disabled in our gossip, we should be returning `LocalHTLCFailureReason::PeerOffline` rather than `LocalHTLCFailureReason::ChannelNotReady`. Here we fix the error returned and tweak documentation to make the cases clearer. --- lightning/src/ln/channelmanager.rs | 2 ++ lightning/src/ln/onion_utils.rs | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 04f25bc76ff..33be6cbba99 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4807,6 +4807,8 @@ where if !chan.context.is_live() { if !chan.context.is_enabled() { return Err(LocalHTLCFailureReason::ChannelDisabled); + } else if !chan.context.is_connected() { + return Err(LocalHTLCFailureReason::PeerOffline); } else { return Err(LocalHTLCFailureReason::ChannelNotReady); } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index eab5e721665..7cf1062a885 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1573,7 +1573,8 @@ pub enum LocalHTLCFailureReason { /// /// The forwarding node has tampered with this value, or has a bug in its implementation. FinalIncorrectHTLCAmount, - /// The channel has been marked as disabled because the channel peer is offline. + /// The HTLC couldn't be forwarded because the channel counterparty has been offline for some + /// time. ChannelDisabled, /// The HTLC expires too far in the future, so it is rejected to avoid the worst-case outcome /// of funds being held for extended periods of time. From 4d5b0a6e375b778ab433b671010e72513204de26 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 5 Jan 2026 02:06:52 +0000 Subject: [PATCH 4/7] DRY HTLC failure paths in `process_pending_update_add_htlcs` --- lightning/src/ln/channelmanager.rs | 82 ++++++++++++------------------ 1 file changed, 32 insertions(+), 50 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 33be6cbba99..9248a0c374a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6887,6 +6887,22 @@ where }); let shared_secret = next_hop.shared_secret().secret_bytes(); + macro_rules! fail_htlc_continue_to_next { + ($reason: expr) => {{ + let htlc_fail = self.htlc_failure_from_update_add_err( + &update_add_htlc, + &incoming_counterparty_node_id, + $reason, + is_intro_node_blinded_forward, + &shared_secret, + ); + let failure_type = + get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); + htlc_fails.push((htlc_fail, failure_type, $reason.into())); + continue; + }}; + } + // Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature // support. // @@ -6899,18 +6915,7 @@ where if update_add_htlc.hold_htlc.is_some() && !BaseMessageHandler::provided_node_features(self).supports_htlc_hold() { - let reason = LocalHTLCFailureReason::TemporaryNodeFailure; - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(LocalHTLCFailureReason::TemporaryNodeFailure); } // Process the HTLC on the incoming channel. @@ -6927,17 +6932,7 @@ where ) { Some(Ok(_)) => {}, Some(Err(reason)) => { - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(reason); }, // The incoming channel no longer exists, HTLCs should be resolved onchain instead. None => continue 'outer_loop, @@ -6948,17 +6943,7 @@ where if let Err(reason) = self.can_forward_htlc(&update_add_htlc, next_packet_details) { - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = - get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, failure_type, reason.into())); - continue; + fail_htlc_continue_to_next!(reason); } } @@ -6970,6 +6955,12 @@ where next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { Ok(info) => { + let logger = WithContext::from( + &self.logger, + None, + Some(incoming_channel_id), + Some(update_add_htlc.payment_hash), + ); if info.routing.should_hold_htlc() { let intercept_id = InterceptId::from_htlc_id_and_chan_id( update_add_htlc.htlc_id, @@ -6979,10 +6970,9 @@ where let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); match held_htlcs.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { - log_trace!( - self.logger, - "Intercepted held HTLC with id {}, holding until the recipient is online", - intercept_id + log_debug!( + logger, + "Intercepted held HTLC with id {intercept_id}, holding until the recipient is online" ); let pending_add = PendingAddHTLCInfo { prev_outbound_scid_alias: incoming_scid_alias, @@ -6997,19 +6987,11 @@ where }, hash_map::Entry::Occupied(_) => { debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); - let reason = LocalHTLCFailureReason::TemporaryNodeFailure; - let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, - &incoming_counterparty_node_id, - reason, - is_intro_node_blinded_forward, - &shared_secret, - ); - let failure_type = get_htlc_failure_type( - outgoing_scid_opt, - update_add_htlc.payment_hash, + log_error!(logger, "Duplicate intercept id for HTLC"); + debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure ); - htlc_fails.push((htlc_fail, failure_type, reason.into())); }, } } else { From 04351fd8ab5be99e17efa46be8a7808a2f4eaf1c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 14 Jan 2026 13:28:01 +0000 Subject: [PATCH 5/7] Move HTLC interception decisions to `forward_htlcs` callsites In the next commit we'll substantially expand the types of HTLCs which can be intercepted. In order to do so, we want to make forwarding decisions with access to the (specified) destination channel. Sadly, this isn't available in `forward_htlcs`, so here we move interception decisions out of `forward_htlcs` and into `process_pending_update_add_htlcs` and `handle_release_held_htlc`. Note that we do not handle HTLC interception when forwarding an HTLC which was decoded in LDK versions prior to 0.2, which is noted in a suggested release note. This is due to a gap where such HTLC might have had its routing decision made already and be waiting for an interception decision in `forward_htlcs`, but now we will only make an interception decision when decoding the onion. --- lightning/src/ln/channelmanager.rs | 361 +++++++++++-------- lightning/src/ln/functional_test_utils.rs | 2 +- pending_changelog/matt-full-interception.txt | 4 + 3 files changed, 207 insertions(+), 160 deletions(-) create mode 100644 pending_changelog/matt-full-interception.txt diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9248a0c374a..8bef96835eb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -637,14 +637,6 @@ impl Readable for PaymentId { pub struct InterceptId(pub [u8; 32]); impl InterceptId { - /// This intercept id corresponds to an HTLC that will be forwarded on - /// [`ChannelManager::forward_intercepted_htlc`]. - fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self { - Self(Sha256::hash(ss).to_byte_array()) - } - - /// This intercept id corresponds to an HTLC that will be forwarded on receipt of a - /// [`ReleaseHeldHtlc`] onion message. fn from_htlc_id_and_chan_id( htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Self { @@ -4776,10 +4768,27 @@ where } } + fn forward_needs_intercept( + &self, outbound_chan: Option<&FundedChannel>, outgoing_scid: u64, + ) -> bool { + if outbound_chan.is_none() { + if fake_scid::is_valid_intercept( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + if self.config.read().unwrap().accept_intercept_htlcs { + return true; + } + } + } + false + } + #[rustfmt::skip] fn can_forward_htlc_to_outgoing_channel( &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails - ) -> Result<(), LocalHTLCFailureReason> { + ) -> Result { if !chan.context.should_announce() && !self.config.read().unwrap().accept_forwards_to_priv_channels { @@ -4788,6 +4797,7 @@ where // we don't allow forwards outbound over them. return Err(LocalHTLCFailureReason::PrivateChannelForward); } + let intercepted; if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector { if chan.funding.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means @@ -4795,6 +4805,7 @@ where // we don't have the channel here. return Err(LocalHTLCFailureReason::RealSCIDForward); } + intercepted = self.forward_needs_intercept(Some(chan), outgoing_scid); } else { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); } @@ -4804,7 +4815,7 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !chan.context.is_live() { + if !intercepted && !chan.context.is_live() { if !chan.context.is_enabled() { return Err(LocalHTLCFailureReason::ChannelDisabled); } else if !chan.context.is_connected() { @@ -4818,7 +4829,7 @@ where } chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?; - Ok(()) + Ok(intercepted) } /// Executes a callback `C` that returns some value `X` on the channel found with the given @@ -4844,42 +4855,63 @@ where } } - #[rustfmt::skip] - fn can_forward_htlc( - &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails - ) -> Result<(), LocalHTLCFailureReason> { + fn can_forward_htlc_intercepted( + &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails, + ) -> Result { let outgoing_scid = match next_packet_details.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, HopConnector::Dummy => { // Dummy hops are only used for path padding and must not reach HTLC processing. debug_assert!(false, "Dummy hop reached HTLC handling."); return Err(LocalHTLCFailureReason::InvalidOnionPayload); - } + }, HopConnector::Trampoline(_) => { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); - } + }, }; - match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { - self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) - }) { - Some(Ok(())) => {}, - Some(Err(e)) => return Err(e), - None => { - // If we couldn't find the channel info for the scid, it may be a phantom or - // intercept forward. - if (self.config.read().unwrap().accept_intercept_htlcs && - fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || - fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) - {} else { - return Err(LocalHTLCFailureReason::UnknownNextPeer); - } - } - } + // TODO: We do the fake SCID namespace check a bunch of times here (and indirectly via + // `forward_needs_intercept`, including as called in + // `can_forward_htlc_to_outgoing_channel`), we should find a way to reduce the number of + // times we do it. + let intercept = + match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { + self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) + }) { + Some(Ok(intercept)) => intercept, + Some(Err(e)) => return Err(e), + None => { + // Perform basic sanity checks on the amounts and CLTV being forwarded + if next_packet_details.outgoing_amt_msat > msg.amount_msat { + return Err(LocalHTLCFailureReason::FeeInsufficient); + } + let cltv_delta = + msg.cltv_expiry.saturating_sub(next_packet_details.outgoing_cltv_value); + if cltv_delta < MIN_CLTV_EXPIRY_DELTA.into() { + return Err(LocalHTLCFailureReason::IncorrectCLTVExpiry); + } + + if fake_scid::is_valid_phantom( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + false + } else if self.forward_needs_intercept(None, outgoing_scid) { + true + } else { + return Err(LocalHTLCFailureReason::UnknownNextPeer); + } + }, + }; let cur_height = self.best_block.read().unwrap().height + 1; - check_incoming_htlc_cltv(cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry)?; + check_incoming_htlc_cltv( + cur_height, + next_packet_details.outgoing_cltv_value, + msg.cltv_expiry, + )?; - Ok(()) + Ok(intercept) } #[rustfmt::skip] @@ -6939,11 +6971,13 @@ where } // Now process the HTLC on the outgoing channel if it's a forward. + let mut intercept_forward = false; if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - if let Err(reason) = - self.can_forward_htlc(&update_add_htlc, next_packet_details) - { - fail_htlc_continue_to_next!(reason); + match self.can_forward_htlc_intercepted(&update_add_htlc, next_packet_details) { + Err(reason) => { + fail_htlc_continue_to_next!(reason); + }, + Ok(intercept) => intercept_forward = intercept, } } @@ -6955,6 +6989,22 @@ where next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { Ok(info) => { + let to_pending_add = |info| PendingAddHTLCInfo { + prev_outbound_scid_alias: incoming_scid_alias, + prev_counterparty_node_id: incoming_counterparty_node_id, + prev_funding_outpoint: incoming_funding_txo, + prev_channel_id: incoming_channel_id, + prev_htlc_id: update_add_htlc.htlc_id, + prev_user_channel_id: incoming_user_channel_id, + forward_info: info, + }; + let intercept_id = || { + InterceptId::from_htlc_id_and_chan_id( + update_add_htlc.htlc_id, + &incoming_channel_id, + &incoming_counterparty_node_id, + ) + }; let logger = WithContext::from( &self.logger, None, @@ -6962,32 +7012,64 @@ where Some(update_add_htlc.payment_hash), ); if info.routing.should_hold_htlc() { - let intercept_id = InterceptId::from_htlc_id_and_chan_id( - update_add_htlc.htlc_id, - &incoming_channel_id, - &incoming_counterparty_node_id, - ); let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap(); + let intercept_id = intercept_id(); match held_htlcs.entry(intercept_id) { hash_map::Entry::Vacant(entry) => { log_debug!( logger, "Intercepted held HTLC with id {intercept_id}, holding until the recipient is online" ); - let pending_add = PendingAddHTLCInfo { - prev_outbound_scid_alias: incoming_scid_alias, - prev_counterparty_node_id: incoming_counterparty_node_id, - prev_funding_outpoint: incoming_funding_txo, - prev_channel_id: incoming_channel_id, - prev_htlc_id: update_add_htlc.htlc_id, - prev_user_channel_id: incoming_user_channel_id, - forward_info: info, - }; + let pending_add = to_pending_add(info); entry.insert(pending_add); }, hash_map::Entry::Occupied(_) => { debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); log_error!(logger, "Duplicate intercept id for HTLC"); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure + ); + }, + } + } else if intercept_forward { + let intercept_id = intercept_id(); + let mut pending_intercepts = + self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + let pending_add = to_pending_add(info); + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &pending_add) + { + log_debug!( + logger, + "Intercepted HTLC, generating intercept event with ID {intercept_id}" + ); + let ev_entry = (intercept_ev, None); + // It's possible we processed this intercept forward, + // generated an event, then re-processed it here after + // restart, in which case the intercept event should not be + // pushed redundantly. + let mut events = self.pending_events.lock().unwrap(); + events.retain(|ev| *ev != ev_entry); + events.push_back(ev_entry); + entry.insert(pending_add); + } else { + debug_assert!(false); + log_error!( + logger, + "Failed to generate an intercept event for HTLC" + ); + fail_htlc_continue_to_next!( + LocalHTLCFailureReason::TemporaryNodeFailure + ); + } + }, + hash_map::Entry::Occupied(_) => { + log_error!( + logger, + "Failed to forward incoming HTLC: detected duplicate intercepted payment", + ); debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id"); fail_htlc_continue_to_next!( LocalHTLCFailureReason::TemporaryNodeFailure @@ -11886,26 +11968,15 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ref mut pending_forwards, ) in per_source_pending_forwards { - let mut new_intercept_events = VecDeque::new(); - let mut failed_intercept_forwards = Vec::new(); if !pending_forwards.is_empty() { for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { let scid = match forward_info.routing { PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, - PendingHTLCRouting::TrampolineForward { .. } => 0, - PendingHTLCRouting::Receive { .. } => 0, - PendingHTLCRouting::ReceiveKeysend { .. } => 0, + PendingHTLCRouting::TrampolineForward { .. } + | PendingHTLCRouting::Receive { .. } + | PendingHTLCRouting::ReceiveKeysend { .. } => 0, }; - // Pull this now to avoid introducing a lock order with `forward_htlcs`. - let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid); - let payment_hash = forward_info.payment_hash; - let logger = WithContext::from( - &self.logger, - None, - Some(prev_channel_id), - Some(payment_hash), - ); let pending_add = PendingAddHTLCInfo { prev_outbound_scid_alias, prev_counterparty_node_id, @@ -11915,88 +11986,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ prev_user_channel_id, forward_info, }; - let mut fail_intercepted_htlc = |pending_add: PendingAddHTLCInfo| { - let htlc_source = - HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data()); - let reason = HTLCFailReason::from_failure_code( - LocalHTLCFailureReason::UnknownNextPeer, - ); - let failure_type = HTLCHandlingFailureType::InvalidForward { - requested_forward_scid: scid, - }; - failed_intercept_forwards.push(( - htlc_source, - payment_hash, - reason, - failure_type, - )); - }; - if !is_our_scid - && pending_add.forward_info.incoming_amt_msat.is_some() - && fake_scid::is_valid_intercept( - &self.fake_scid_rand_bytes, - scid, - &self.chain_hash, - ) { - let intercept_id = InterceptId::from_incoming_shared_secret( - &pending_add.forward_info.incoming_shared_secret, - ); - let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); - match pending_intercepts.entry(intercept_id) { - hash_map::Entry::Vacant(entry) => { - if let Ok(intercept_ev) = - create_htlc_intercepted_event(intercept_id, &pending_add) - { - new_intercept_events.push_back((intercept_ev, None)); - entry.insert(pending_add); - } else { - debug_assert!(false); - fail_intercepted_htlc(pending_add); - } - }, - hash_map::Entry::Occupied(_) => { - log_info!( - logger, - "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", - scid - ); - fail_intercepted_htlc(pending_add); - }, - } - } else { - match self.forward_htlcs.lock().unwrap().entry(scid) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); - }, - } + match self.forward_htlcs.lock().unwrap().entry(scid) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add)); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]); + }, } } } - - for (htlc_source, payment_hash, failure_reason, destination) in - failed_intercept_forwards.drain(..) - { - self.fail_htlc_backwards_internal( - &htlc_source, - &payment_hash, - &failure_reason, - destination, - None, - ); - } - - if !new_intercept_events.is_empty() { - let mut events = self.pending_events.lock().unwrap(); - // It's possible we processed this intercept forward, generated an event, then re-processed - // it here after restart, in which case the intercept event should not be pushed - // redundantly. - new_intercept_events.retain(|ev| !events.contains(ev)); - events.append(&mut new_intercept_events); - } } } @@ -16267,6 +16267,7 @@ where prev_outbound_scid_alias, htlc_id, } => { + let _serialize_guard = PersistenceNotifierGuard::notify_on_drop(self); // It's possible the release_held_htlc message raced ahead of us transitioning the pending // update_add to `Self::pending_intercept_htlcs`. If that's the case, update the pending // update_add to indicate that the HTLC should be released immediately. @@ -16305,16 +16306,18 @@ where }, } }; - match htlc.forward_info.routing { - PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => { + let next_hop_scid = match htlc.forward_info.routing { + PendingHTLCRouting::Forward { ref mut hold_htlc, short_channel_id, .. } => { debug_assert!(hold_htlc.is_some()); *hold_htlc = None; + short_channel_id }, _ => { debug_assert!(false, "HTLC intercepts can only be forwards"); + // Let the HTLC be auto-failed before it expires. return; }, - } + }; let logger = WithContext::from( &self.logger, @@ -16324,16 +16327,56 @@ where ); log_trace!(logger, "Releasing held htlc with intercept_id {}", intercept_id); - let mut per_source_pending_forward = [( - htlc.prev_outbound_scid_alias, - htlc.prev_counterparty_node_id, - htlc.prev_funding_outpoint, - htlc.prev_channel_id, - htlc.prev_user_channel_id, - vec![(htlc.forward_info, htlc.prev_htlc_id)], - )]; - self.forward_htlcs(&mut per_source_pending_forward); - PersistenceNotifierGuard::notify_on_drop(self); + let should_intercept = self + .do_funded_channel_callback(next_hop_scid, |chan| { + self.forward_needs_intercept(Some(chan), next_hop_scid) + }) + .unwrap_or_else(|| self.forward_needs_intercept(None, next_hop_scid)); + + if should_intercept { + let intercept_id = InterceptId::from_htlc_id_and_chan_id( + htlc.prev_htlc_id, + &htlc.prev_channel_id, + &htlc.prev_counterparty_node_id, + ); + let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); + match pending_intercepts.entry(intercept_id) { + hash_map::Entry::Vacant(entry) => { + if let Ok(intercept_ev) = + create_htlc_intercepted_event(intercept_id, &htlc) + { + self.pending_events.lock().unwrap().push_back((intercept_ev, None)); + entry.insert(htlc); + } else { + debug_assert!(false); + // Let the HTLC be auto-failed before it expires. + return; + } + }, + hash_map::Entry::Occupied(_) => { + log_error!( + logger, + "Failed to forward incoming HTLC: detected duplicate intercepted payment", + ); + debug_assert!( + false, + "Should never have two HTLCs with the same channel id and htlc id", + ); + // Let the HTLC be auto-failed before it expires. + return; + }, + } + } else { + let mut per_source_pending_forward = [( + htlc.prev_outbound_scid_alias, + htlc.prev_counterparty_node_id, + htlc.prev_funding_outpoint, + htlc.prev_channel_id, + htlc.prev_user_channel_id, + vec![(htlc.forward_info, htlc.prev_htlc_id)], + )]; + self.forward_htlcs(&mut per_source_pending_forward); + } }, _ => return, } @@ -17062,6 +17105,7 @@ where } } + let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); // Since some FundingNegotiation variants are not persisted, any splice in such state must // be failed upon reload. However, as the necessary information for the SpliceFailed event @@ -17159,7 +17203,6 @@ where } let mut pending_intercepted_htlcs = None; - let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap(); if our_pending_intercepts.len() != 0 { pending_intercepted_htlcs = Some(our_pending_intercepts); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 2cf5ea96acb..1eda3bdf9f7 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2906,7 +2906,7 @@ pub fn check_payment_claimable( _ => {}, } }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event {event:?}"), } } diff --git a/pending_changelog/matt-full-interception.txt b/pending_changelog/matt-full-interception.txt new file mode 100644 index 00000000000..2cc51a56305 --- /dev/null +++ b/pending_changelog/matt-full-interception.txt @@ -0,0 +1,4 @@ +# Backwards Compatibility + * HTLCs which were first received on an LDK version prior to LDK 0.2 will no + longer be intercepted. Instead, they will be handled as if they were not + intercepted and be forwarded/failed automatically. From a0723ad77f2907234fe6a6ac87100a2ac1b215ce Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 13 Jan 2026 19:57:32 +0000 Subject: [PATCH 6/7] Support generic HTLC interception At various points we've had requests to support more generic HTLC interception in LDK. In most cases, full HTLC interception was not, in fact, the right way to accomplish what the developer wanted, but there have been various times when it might have been. Here, we finally add full HTLC interception support, doing so with a configurable bitfield to allow developers to intercept only certain classes of HTLCs. Specifically, we currently support intercepting HTLCs: * which were to be forwarded to intercept SCIDs (as was already supported), * which were to be forwarded to offline private channels (for LSPs to accept HTLCs for offline clients so that they can attempt to wake them before failing the HTLC), * which were to be forwarded to online private channels (for LSPs to take additional fees or enforce certain policies), * which were to be forwarded over public channels (for general forwarding policy enforcement), * which were to be forwarded to unknown SCIDs (for everything else). --- .../tests/lsps2_integration_tests.rs | 9 +- lightning/src/events/mod.rs | 20 +- lightning/src/ln/async_payments_tests.rs | 4 +- lightning/src/ln/blinded_payment_tests.rs | 5 +- lightning/src/ln/channelmanager.rs | 104 ++++--- lightning/src/ln/interception_tests.rs | 290 ++++++++++++++++++ lightning/src/ln/mod.rs | 3 + lightning/src/ln/payment_tests.rs | 7 +- lightning/src/ln/reload_tests.rs | 8 +- lightning/src/util/config.rs | 127 +++++++- 10 files changed, 504 insertions(+), 73 deletions(-) create mode 100644 lightning/src/ln/interception_tests.rs diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 2e469d149b0..45c2891227d 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -38,6 +38,7 @@ use lightning::ln::peer_handler::CustomMessageHandler; use lightning::log_error; use lightning::routing::router::{RouteHint, RouteHintHop}; use lightning::sign::NodeSigner; +use lightning::util::config::HTLCInterceptionFlags; use lightning::util::errors::APIError; use lightning::util::logger::Logger; use lightning::util::test_utils::{TestBroadcaster, TestStore}; @@ -1157,7 +1158,7 @@ fn client_trusts_lsp_end_to_end_test() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; @@ -1630,7 +1631,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; @@ -1821,7 +1822,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; @@ -2157,7 +2158,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut service_node_config = test_default_channel_config(); - service_node_config.accept_intercept_htlcs = true; + service_node_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; let mut client_node_config = test_default_channel_config(); client_node_config.manually_accept_inbound_channels = true; diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d97ae6097b6..277ce612494 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1250,28 +1250,29 @@ pub enum Event { short_channel_id: Option, }, /// Used to indicate that we've intercepted an HTLC forward. This event will only be generated if - /// you've encoded an intercept scid in the receiver's invoice route hints using - /// [`ChannelManager::get_intercept_scid`] and have set [`UserConfig::accept_intercept_htlcs`]. + /// you've set some flags on [`UserConfig::htlc_interception_flags`]. /// /// [`ChannelManager::forward_intercepted_htlc`] or - /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event. See - /// their docs for more information. + /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to this event in a + /// timely manner (i.e. within some number of seconds, not minutes). See their docs for more + /// information. /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. /// - /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid - /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs + /// [`UserConfig::htlc_interception_flags`]: crate::util::config::UserConfig::htlc_interception_flags /// [`ChannelManager::forward_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::forward_intercepted_htlc /// [`ChannelManager::fail_intercepted_htlc`]: crate::ln::channelmanager::ChannelManager::fail_intercepted_htlc HTLCIntercepted { /// An id to help LDK identify which HTLC is being forwarded or failed. intercept_id: InterceptId, - /// The fake scid that was programmed as the next hop's scid, generated using - /// [`ChannelManager::get_intercept_scid`]. + /// The SCID which was selected by the sender as the next hop. It may point to one of our + /// channels, an intercept SCID generated with [`ChannelManager::get_intercept_scid`], or + /// an unknown SCID if [`HTLCInterceptionFlags::ToUnknownSCIDs`] was selected. /// /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`HTLCInterceptionFlags::ToUnknownSCIDs`]: crate::util::config::HTLCInterceptionFlags::ToUnknownSCIDs requested_next_hop_scid: u64, /// The payment hash used for this HTLC. payment_hash: PaymentHash, @@ -1282,7 +1283,8 @@ pub enum Event { /// Forwarding less than this amount may break compatibility with LDK versions prior to 0.0.116. /// /// Note that LDK will NOT check that expected fees were factored into this value. You MUST - /// check that whatever fee you want has been included here or subtract it as required. Further, + /// check that whatever fee you want has been included here (by comparing with + /// [`Self::HTLCIntercepted::inbound_amount_msat`]) or subtract it as required. Further, /// LDK will not stop you from forwarding more than you received. expected_outbound_amount_msat: u64, }, diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 0b2652920e1..b8d23217cef 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -60,7 +60,7 @@ use crate::sign::NodeSigner; use crate::sync::Mutex; use crate::types::features::Bolt12InvoiceFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use crate::util::ser::Writeable; use bitcoin::constants::ChainHash; use bitcoin::network::Network; @@ -3063,7 +3063,7 @@ fn intercepted_hold_htlc() { recipient_cfg.channel_handshake_limits.force_announced_channel_preference = false; let mut lsp_cfg = test_default_channel_config(); - lsp_cfg.accept_intercept_htlcs = true; + lsp_cfg.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs as u8; lsp_cfg.accept_forwards_to_priv_channels = true; lsp_cfg.enable_htlc_hold = true; diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 74981ead7f1..5a7c326ebaa 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -32,7 +32,7 @@ use crate::routing::router::{ use crate::sign::{NodeSigner, PeerStorageKey, ReceiveAuthKey, Recipient}; use crate::types::features::{BlindedHopFeatures, ChannelFeatures, NodeFeatures}; use crate::types::payment::{PaymentHash, PaymentSecret}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use crate::util::ser::{WithoutLength, Writeable}; use crate::util::test_utils::{self, bytes_from_hex, pubkey_from_hex, secret_from_hex}; use bitcoin::hex::DisplayHex; @@ -769,7 +769,8 @@ fn do_blinded_intercept_payment(intercept_node_fails: bool) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8bef96835eb..eeb5a536483 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -122,7 +122,9 @@ use crate::types::features::{ }; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::types::string::UntrustedString; -use crate::util::config::{ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, UserConfig}; +use crate::util::config::{ + ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, HTLCInterceptionFlags, UserConfig, +}; use crate::util::errors::APIError; use crate::util::logger::{Level, Logger, WithContext}; use crate::util::scid_utils::fake_scid; @@ -4768,27 +4770,53 @@ where } } - fn forward_needs_intercept( - &self, outbound_chan: Option<&FundedChannel>, outgoing_scid: u64, - ) -> bool { - if outbound_chan.is_none() { - if fake_scid::is_valid_intercept( - &self.fake_scid_rand_bytes, - outgoing_scid, - &self.chain_hash, - ) { - if self.config.read().unwrap().accept_intercept_htlcs { + fn forward_needs_intercept_to_known_chan(&self, outbound_chan: &FundedChannel) -> bool { + let intercept_flags = self.config.read().unwrap().htlc_interception_flags; + if !outbound_chan.context.should_announce() { + if outbound_chan.context.is_connected() { + if intercept_flags & (HTLCInterceptionFlags::ToOnlinePrivateChannels as u8) != 0 { + return true; + } + } else { + if intercept_flags & (HTLCInterceptionFlags::ToOfflinePrivateChannels as u8) != 0 { return true; } } + } else { + if intercept_flags & (HTLCInterceptionFlags::ToPublicChannels as u8) != 0 { + return true; + } + } + false + } + + fn forward_needs_intercept_to_unknown_chan(&self, outgoing_scid: u64) -> bool { + let intercept_flags = self.config.read().unwrap().htlc_interception_flags; + if fake_scid::is_valid_intercept( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + if intercept_flags & (HTLCInterceptionFlags::ToInterceptSCIDs as u8) != 0 { + return true; + } + } else if fake_scid::is_valid_phantom( + &self.fake_scid_rand_bytes, + outgoing_scid, + &self.chain_hash, + ) { + // Handled as a normal forward + } else if intercept_flags & (HTLCInterceptionFlags::ToUnknownSCIDs as u8) != 0 { + return true; } false } #[rustfmt::skip] fn can_forward_htlc_to_outgoing_channel( - &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails - ) -> Result { + &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, + next_packet: &NextPacketDetails, will_intercept: bool, + ) -> Result<(), LocalHTLCFailureReason> { if !chan.context.should_announce() && !self.config.read().unwrap().accept_forwards_to_priv_channels { @@ -4797,7 +4825,6 @@ where // we don't allow forwards outbound over them. return Err(LocalHTLCFailureReason::PrivateChannelForward); } - let intercepted; if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector { if chan.funding.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means @@ -4805,7 +4832,6 @@ where // we don't have the channel here. return Err(LocalHTLCFailureReason::RealSCIDForward); } - intercepted = self.forward_needs_intercept(Some(chan), outgoing_scid); } else { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); } @@ -4815,7 +4841,7 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !intercepted && !chan.context.is_live() { + if !will_intercept && !chan.context.is_live() { if !chan.context.is_enabled() { return Err(LocalHTLCFailureReason::ChannelDisabled); } else if !chan.context.is_connected() { @@ -4827,9 +4853,7 @@ where if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { return Err(LocalHTLCFailureReason::AmountBelowMinimum); } - chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?; - - Ok(intercepted) + chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) } /// Executes a callback `C` that returns some value `X` on the channel found with the given @@ -4855,10 +4879,10 @@ where } } - fn can_forward_htlc_intercepted( - &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails, + fn can_forward_htlc_should_intercept( + &self, msg: &msgs::UpdateAddHTLC, next_hop: &NextPacketDetails, ) -> Result { - let outgoing_scid = match next_packet_details.outgoing_connector { + let outgoing_scid = match next_hop.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, HopConnector::Dummy => { // Dummy hops are only used for path padding and must not reach HTLC processing. @@ -4870,22 +4894,23 @@ where }, }; // TODO: We do the fake SCID namespace check a bunch of times here (and indirectly via - // `forward_needs_intercept`, including as called in + // `forward_needs_intercept_*`, including as called in // `can_forward_htlc_to_outgoing_channel`), we should find a way to reduce the number of // times we do it. let intercept = match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { - self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details) + let intercept = self.forward_needs_intercept_to_known_chan(chan); + self.can_forward_htlc_to_outgoing_channel(chan, msg, next_hop, intercept)?; + Ok(intercept) }) { Some(Ok(intercept)) => intercept, Some(Err(e)) => return Err(e), None => { // Perform basic sanity checks on the amounts and CLTV being forwarded - if next_packet_details.outgoing_amt_msat > msg.amount_msat { + if next_hop.outgoing_amt_msat > msg.amount_msat { return Err(LocalHTLCFailureReason::FeeInsufficient); } - let cltv_delta = - msg.cltv_expiry.saturating_sub(next_packet_details.outgoing_cltv_value); + let cltv_delta = msg.cltv_expiry.saturating_sub(next_hop.outgoing_cltv_value); if cltv_delta < MIN_CLTV_EXPIRY_DELTA.into() { return Err(LocalHTLCFailureReason::IncorrectCLTVExpiry); } @@ -4896,7 +4921,7 @@ where &self.chain_hash, ) { false - } else if self.forward_needs_intercept(None, outgoing_scid) { + } else if self.forward_needs_intercept_to_unknown_chan(outgoing_scid) { true } else { return Err(LocalHTLCFailureReason::UnknownNextPeer); @@ -4905,11 +4930,7 @@ where }; let cur_height = self.best_block.read().unwrap().height + 1; - check_incoming_htlc_cltv( - cur_height, - next_packet_details.outgoing_cltv_value, - msg.cltv_expiry, - )?; + check_incoming_htlc_cltv(cur_height, next_hop.outgoing_cltv_value, msg.cltv_expiry)?; Ok(intercept) } @@ -6641,11 +6662,8 @@ where /// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time /// channel to a receiving node if the node lacks sufficient inbound liquidity. /// - /// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use - /// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the - /// receiver's invoice route hints. These route hints will signal to LDK to generate an - /// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or - /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event. + /// To make use of intercepted HTLCs, set [`UserConfig::htlc_interception_flags`] must have a + /// non-0 value. /// /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop /// you from forwarding more than you received. See @@ -6655,7 +6673,7 @@ where /// Errors if the event was not handled in time, in which case the HTLC was automatically failed /// backwards. /// - /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs + /// [`UserConfig::htlc_interception_flags`]: crate::util::config::UserConfig::htlc_interception_flags /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted /// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat // TODO: when we move to deciding the best outbound channel at forward time, only take @@ -6973,7 +6991,9 @@ where // Now process the HTLC on the outgoing channel if it's a forward. let mut intercept_forward = false; if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - match self.can_forward_htlc_intercepted(&update_add_htlc, next_packet_details) { + match self + .can_forward_htlc_should_intercept(&update_add_htlc, next_packet_details) + { Err(reason) => { fail_htlc_continue_to_next!(reason); }, @@ -16329,9 +16349,9 @@ where let should_intercept = self .do_funded_channel_callback(next_hop_scid, |chan| { - self.forward_needs_intercept(Some(chan), next_hop_scid) + self.forward_needs_intercept_to_known_chan(chan) }) - .unwrap_or_else(|| self.forward_needs_intercept(None, next_hop_scid)); + .unwrap_or_else(|| self.forward_needs_intercept_to_unknown_chan(next_hop_scid)); if should_intercept { let intercept_id = InterceptId::from_htlc_id_and_chan_id( diff --git a/lightning/src/ln/interception_tests.rs b/lightning/src/ln/interception_tests.rs new file mode 100644 index 00000000000..11b5de166f6 --- /dev/null +++ b/lightning/src/ln/interception_tests.rs @@ -0,0 +1,290 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests that test standing up a network of ChannelManagers, creating channels, sending +//! payments/messages between them, and often checking the resulting ChannelMonitors are able to +//! claim outputs on-chain. + +use crate::events::{Event, HTLCHandlingFailureReason, HTLCHandlingFailureType}; +use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler}; +use crate::ln::onion_utils::LocalHTLCFailureReason; +use crate::routing::router::PaymentParameters; +use crate::util::config::HTLCInterceptionFlags; + +use crate::prelude::*; + +use crate::ln::functional_test_utils::*; + +#[derive(Clone, Copy, PartialEq, Eq)] +enum ForwardingMod { + FeeTooLow, + CLTVBelowConfig, + CLTVBelowMin, +} + +fn do_test_htlc_interception_flags( + flags_bitmask: u8, flag: HTLCInterceptionFlags, modification: Option, +) { + use HTLCInterceptionFlags as Flag; + + assert_eq!((flag as isize).count_ones(), 1, "We can only test one type of HTLC at once"); + + // Tests that the `htlc_interception_flags` bitmask given by `flags_bitmask` correctly + // intercepts (or doesn't intercept) an HTLC which is of type `flag` + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + let mut intercept_config = test_default_channel_config(); + intercept_config.htlc_interception_flags = flags_bitmask; + intercept_config.channel_config.forwarding_fee_base_msat = 1000; + intercept_config.channel_config.cltv_expiry_delta = 6 * 24; + intercept_config.accept_forwards_to_priv_channels = true; + + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_config), None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + let node_0_id = nodes[0].node.get_our_node_id(); + let node_1_id = nodes[1].node.get_our_node_id(); + let node_2_id = nodes[2].node.get_our_node_id(); + + // First open the right type of channel (and get it in the right state) for the bit we're + // testing. + let (target_scid, target_chan_id) = match flag { + Flag::ToOfflinePrivateChannels | Flag::ToOnlinePrivateChannels => { + create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 0); + let chan_id = nodes[2].node.list_channels()[0].channel_id; + let scid = nodes[2].node.list_channels()[0].short_channel_id.unwrap(); + if flag == Flag::ToOfflinePrivateChannels { + nodes[1].node.peer_disconnected(node_2_id); + nodes[2].node.peer_disconnected(node_1_id); + } else { + assert_eq!(flag, Flag::ToOnlinePrivateChannels); + } + (scid, chan_id) + }, + Flag::ToInterceptSCIDs | Flag::ToPublicChannels | Flag::ToUnknownSCIDs => { + let (chan_upd, _, chan_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); + if flag == Flag::ToInterceptSCIDs { + (nodes[1].node.get_intercept_scid(), chan_id) + } else if flag == Flag::ToPublicChannels { + (chan_upd.contents.short_channel_id, chan_id) + } else if flag == Flag::ToUnknownSCIDs { + (42424242, chan_id) + } else { + panic!(); + } + }, + _ => panic!("Combined flags aren't allowed"), + }; + + // Start every node on the same block height to ensure we don't hit spurious CLTV issues + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + // Send the HTLC from nodes[0] to nodes[1] and process it to generate the interception (if + // we're set to intercept it). + let amt_msat = 100_000; + let bolt11 = nodes[2].node.create_bolt11_invoice(Default::default()).unwrap(); + let pay_params = PaymentParameters::from_bolt11_invoice(&bolt11); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], pay_params, amt_msat); + route.paths[0].hops[1].short_channel_id = target_scid; + + let interception_bit_match = (flags_bitmask & (flag as u8)) != 0; + match modification { + Some(ForwardingMod::FeeTooLow) => { + assert!( + interception_bit_match, + "No reason to test failing if we aren't trying to intercept", + ); + route.paths[0].hops[0].fee_msat = 500; + }, + Some(ForwardingMod::CLTVBelowConfig) => { + route.paths[0].hops[0].cltv_expiry_delta = 6 * 12; + assert!( + interception_bit_match, + "No reason to test failing if we aren't trying to intercept", + ); + }, + Some(ForwardingMod::CLTVBelowMin) => { + route.paths[0].hops[0].cltv_expiry_delta = 6; + }, + None => {}, + } + + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let payment_event = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(node_0_id, &payment_event.msgs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, true); + expect_and_process_pending_htlcs(&nodes[1], false); + + if interception_bit_match && modification.is_none() { + // If we were set to intercept, check that we got an interception event then + // forward the HTLC on to nodes[2] and claim the payment. + let intercept_id; + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1, "{events:?}"); + if let Event::HTLCIntercepted { intercept_id: id, requested_next_hop_scid, .. } = &events[0] + { + assert_eq!(*requested_next_hop_scid, target_scid, + "Bitmask {flags_bitmask:#x}: Expected interception for bit {flag:?} to target SCID {target_scid}"); + intercept_id = *id; + } else { + panic!("{events:?}"); + } + + if flag == Flag::ToOfflinePrivateChannels { + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.send_channel_ready = (true, true); + reconnect_nodes(reconnect_args); + } + + nodes[1] + .node + .forward_intercepted_htlc(intercept_id, &target_chan_id, node_2_id, amt_msat) + .unwrap(); + expect_and_process_pending_htlcs(&nodes[1], false); + check_added_monitors(&nodes[1], 1); + + let forward_ev = SendEvent::from_node(&nodes[1]); + nodes[2].node.handle_update_add_htlc(node_1_id, &forward_ev.msgs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &forward_ev.commitment_msg, false, true); + + nodes[2].node.process_pending_htlc_forwards(); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + } else { + // If we were not set to intercept, check that the HTLC either failed or was + // automatically forwarded as appropriate. + match (modification, flag) { + (None, Flag::ToOnlinePrivateChannels | Flag::ToPublicChannels) => { + check_added_monitors(&nodes[1], 1); + + let forward_ev = SendEvent::from_node(&nodes[1]); + assert_eq!(forward_ev.node_id, node_2_id); + nodes[2].node.handle_update_add_htlc(node_1_id, &forward_ev.msgs[0]); + let commitment = &forward_ev.commitment_msg; + do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, true); + + nodes[2].node.process_pending_htlc_forwards(); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + }, + _ => { + let events = nodes[1].node.get_and_clear_pending_events(); + let reason_from_mod = match modification { + Some(ForwardingMod::FeeTooLow) => Some(LocalHTLCFailureReason::FeeInsufficient), + Some(ForwardingMod::CLTVBelowConfig) => { + Some(LocalHTLCFailureReason::IncorrectCLTVExpiry) + }, + Some(ForwardingMod::CLTVBelowMin) => { + Some(LocalHTLCFailureReason::IncorrectCLTVExpiry) + }, + None => None, + }; + let (expected_failure_type, reason); + if flag == Flag::ToOfflinePrivateChannels { + expected_failure_type = HTLCHandlingFailureType::Forward { + node_id: Some(node_2_id), + channel_id: target_chan_id, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::PeerOffline); + } else if flag == Flag::ToInterceptSCIDs { + expected_failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: target_scid, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::UnknownNextPeer); + } else if flag == Flag::ToUnknownSCIDs { + expected_failure_type = HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: target_scid, + }; + reason = reason_from_mod.unwrap_or(LocalHTLCFailureReason::UnknownNextPeer); + } else { + expected_failure_type = HTLCHandlingFailureType::Forward { + node_id: Some(node_2_id), + channel_id: target_chan_id, + }; + reason = reason_from_mod + .expect("We should only fail because of a mod or unknown next-hop"); + } + if let Event::HTLCHandlingFailed { failure_reason, failure_type, .. } = &events[0] { + assert_eq!(*failure_reason, Some(HTLCHandlingFailureReason::Local { reason })); + assert_eq!(*failure_type, expected_failure_type); + } else { + panic!("{events:?}"); + } + + check_added_monitors(&nodes[1], 1); + let fail_msgs = get_htlc_update_msgs(&nodes[1], &node_0_id); + nodes[0].node.handle_update_fail_htlc(node_1_id, &fail_msgs.update_fail_htlcs[0]); + let commitment = fail_msgs.commitment_signed; + do_commitment_signed_dance(&nodes[0], &nodes[1], &commitment, true, true); + expect_payment_failed!(nodes[0], payment_hash, false); + }, + } + } +} + +const MAX_BITMASK: u8 = HTLCInterceptionFlags::AllValidHTLCs as u8; +const ALL_FLAGS: [HTLCInterceptionFlags; 5] = [ + HTLCInterceptionFlags::ToInterceptSCIDs, + HTLCInterceptionFlags::ToOfflinePrivateChannels, + HTLCInterceptionFlags::ToOnlinePrivateChannels, + HTLCInterceptionFlags::ToPublicChannels, + HTLCInterceptionFlags::ToUnknownSCIDs, +]; + +#[test] +fn test_htlc_interception_flags() { + let mut all_flag_bits = 0; + for flag in ALL_FLAGS { + all_flag_bits |= flag as isize; + } + assert_eq!(all_flag_bits, MAX_BITMASK as isize, "all flags must test all bits"); + + // Test all 2^5 = 32 combinations of the HTLCInterceptionFlags bitmask + // For each combination, test 5 different HTLC forwards and verify correct interception behavior + for flags_bitmask in 0..=MAX_BITMASK { + for flag in ALL_FLAGS { + do_test_htlc_interception_flags(flags_bitmask, flag, None); + } + } +} + +#[test] +fn test_htlc_bad_for_chan_config() { + // Test that interception won't be done if an HTLC fails to meet the target channel's channel + // config. + let have_chan_flags = [ + HTLCInterceptionFlags::ToOfflinePrivateChannels, + HTLCInterceptionFlags::ToOnlinePrivateChannels, + HTLCInterceptionFlags::ToPublicChannels, + ]; + for flag in have_chan_flags { + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::FeeTooLow)); + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::CLTVBelowConfig)); + } +} + +#[test] +fn test_htlc_bad_no_chan() { + // Test that setting the CLTV below the hard-coded minimum fails whether we're intercepting for + // a channel or not. + for flag in ALL_FLAGS { + do_test_htlc_interception_flags(flag as u8, flag, Some(ForwardingMod::CLTVBelowMin)); + } +} diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index e782fee92f6..b077c98ae73 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -84,6 +84,9 @@ pub mod functional_tests; #[cfg(any(test, feature = "_externalize_tests"))] #[allow(unused_mut)] pub mod htlc_reserve_unit_tests; +#[cfg(any(test, feature = "_externalize_tests"))] +#[allow(unused_mut)] +pub mod interception_tests; #[cfg(test)] #[allow(unused_mut)] mod max_payment_path_len_tests; diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 8f209c88e25..8ac87fbdb10 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -45,6 +45,7 @@ use crate::sign::EntropySource; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::types::string::UntrustedString; +use crate::util::config::HTLCInterceptionFlags; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_utils; @@ -2210,7 +2211,8 @@ fn do_test_intercepted_payment(test: InterceptTest) { let mut zero_conf_chan_config = test_default_channel_config(); zero_conf_chan_config.manually_accept_inbound_channels = true; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let configs = [None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); @@ -2435,7 +2437,8 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let max_in_flight_percent = 10; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; intercept_forwards_config .channel_handshake_config .max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index a38262e6952..4fb2753b6be 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -26,7 +26,7 @@ use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils; use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; -use crate::util::config::UserConfig; +use crate::util::config::{HTLCInterceptionFlags, UserConfig}; use bitcoin::hashes::Hash; use bitcoin::hash_types::BlockHash; @@ -931,7 +931,8 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht let new_chain_monitor; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes_1_deserialized; @@ -1189,7 +1190,8 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { let persister; let new_chain_monitor; let mut intercept_forwards_config = test_default_channel_config(); - intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.htlc_interception_flags = + HTLCInterceptionFlags::ToInterceptSCIDs as u8; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), None]); let nodes_1_deserialized; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index dd1aaa40424..feb326cfad6 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -855,6 +855,111 @@ impl crate::util::ser::Readable for LegacyChannelConfig { } } +/// Flags which can be set on [`UserConfig::htlc_interception_flags`]. Each flag selects some set +/// of HTLCs which are forwarded across this node to be intercepted instead, generating an +/// [`Event::HTLCIntercepted`] instead of automatically forwarding the HTLC and allowing it to be +/// forwarded or rejected manually. +/// +/// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum HTLCInterceptionFlags { + /// If this flag is set, LDK will intercept HTLCs that are attempting to be forwarded over fake + /// short channel ids generated via [`ChannelManager::get_intercept_scid`]. This allows you to + /// only intercept HTLCs which are specifically marked for interception by the invoice being + /// paid. + /// + /// Note that because LDK is not aware of which channel the HTLC will be forwarded over at the + /// time of interception, only basic checks to ensure the fee the HTLC intends to pay is not + /// negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are performed + /// before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and CLTV + /// delta meets your requirements before forwarding the HTLC. + /// + /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToInterceptSCIDs = 1 << 0, + /// If this flag is set, any attempts to forward a payment to a private channel while the + /// channel counterparty is offline will instead generate an [`Event::HTLCIntercepted`] which + /// must be handled the same as any other intercepted HTLC. + /// + /// This is useful for LSPs that may need to wake the recipient node (e.g. via a mobile push + /// notification). Note that in this case you must ensure that you set a quick timeout to fail + /// the HTLC if the recipient node fails to come online (e.g. within 10 seconds). + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToOfflinePrivateChannels = 1 << 1, + /// If this flag is set, any attempts to forward a payment to a private channel while the + /// channel counterparty is online will instead generate an [`Event::HTLCIntercepted`] which + /// must be handled the same as any other intercepted HTLC. + /// + /// This is the complement to [`Self::ToOfflinePrivateChannels`] and, together, they allow + /// intercepting all HTLCs destined for private channels. This may be useful for LSPs that wish + /// to take an additional fee paid by the recipient on all forwards to clients. + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToOnlinePrivateChannels = 1 << 2, + /// If this flag is set, any attempts to forward a payment to a publicly announced channel will + /// instead generate an [`Event::HTLCIntercepted`] which must be handled the same as any other + /// intercepted HTLC. + /// + /// Before interception, the HTLC is validated against the forwarding config of the outbound + /// channel to ensure it pays sufficient fee and meets the + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToPublicChannels = 1 << 3, + /// If these flags are set, any attempts to forward a payment to a channel of ours or a fake + /// short channel id generated via [`ChannelManager::get_intercept_scid`] will instead generate + /// an [`Event::HTLCIntercepted`] which must be handled the same as any other intercepted HTLC. + /// + /// In the case of intercept SCIDs, only basic checks to ensure the fee the HTLC intends to pay + /// is not negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are + /// performed before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and + /// CLTV delta meets your requirements before forwarding the HTLC. + /// + /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToAllKnownSCIDs = Self::ToInterceptSCIDs as isize + | Self::ToOfflinePrivateChannels as isize + | Self::ToOnlinePrivateChannels as isize + | Self::ToPublicChannels as isize, + /// If this flag is set, any attempts to forward a payment to an unknown short channel id will + /// instead generate an [`Event::HTLCIntercepted`] which must be handled the same as any other + /// intercepted HTLC. + /// + /// Note that because LDK is not aware of which channel the HTLC will be forwarded over at the + /// time of interception, only basic checks to ensure the fee the HTLC intends to pay is not + /// negative and a minimum CLTV delta between the incoming and outgoing HTLC edge are performed + /// before the [`Event::HTLCIntercepted`] is generated. You must validate the fee and CLTV + /// delta meets your requirements before forwarding the HTLC. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + ToUnknownSCIDs = 1 << 4, + /// If these flags are set, all HTLCs being forwarded over this node will instead generate an + /// [`Event::HTLCIntercepted`] which must be handled the same as any other intercepted HTLC. + /// + /// In the case of intercept or unknown SCIDs, only basic checks to ensure the fee the HTLC + /// intends to pay is not negative and a minimum CLTV delta between the incoming and outgoing + /// HTLC edge are performed before the [`Event::HTLCIntercepted`] is generated. You must + /// validate the fee and CLTV delta meets your requirements before forwarding the HTLC. + /// + /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted + AllValidHTLCs = Self::ToAllKnownSCIDs as isize | Self::ToUnknownSCIDs as isize, +} + +impl Into for HTLCInterceptionFlags { + fn into(self) -> u8 { + self as u8 + } +} + /// Top-level config which holds ChannelHandshakeLimits and ChannelConfig. /// /// `Default::default()` provides sane defaults for most configurations @@ -907,17 +1012,21 @@ pub struct UserConfig { /// [`msgs::OpenChannel`]: crate::ln::msgs::OpenChannel /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel pub manually_accept_inbound_channels: bool, - /// If this is set to `true`, LDK will intercept HTLCs that are attempting to be forwarded over - /// fake short channel ids generated via [`ChannelManager::get_intercept_scid`]. Upon HTLC - /// intercept, LDK will generate an [`Event::HTLCIntercepted`] which MUST be handled by the user. + /// Flags consisting of OR'd values from [`HTLCInterceptionFlags`] which describe HTLCs + /// forwarded over this node to intercept. Any HTLCs which are intercepted will generate an + /// [`Event::HTLCIntercepted`] event which must be handled to forward or fail the HTLC. /// - /// Setting this to `true` may break backwards compatibility with LDK versions < 0.0.113. + /// Do NOT hold on to intercepted HTLCs for more than a few seconds, they must always be + /// forwarded or failed nearly immediately to avoid performing accidental denial of service + /// attacks against other lightning nodes and being punished appropriately by other nodes. /// - /// Default value: `false` + /// To ensure efficiency and reliable HTLC latency you should ensure you only intercept types + /// of HTLCs which you need to manually forward or reject. + /// + /// Default value: `0` (indicating no HTLCs will be intercepted). /// - /// [`ChannelManager::get_intercept_scid`]: crate::ln::channelmanager::ChannelManager::get_intercept_scid /// [`Event::HTLCIntercepted`]: crate::events::Event::HTLCIntercepted - pub accept_intercept_htlcs: bool, + pub htlc_interception_flags: u8, /// If this is set to `true`, the user needs to manually pay [`Bolt12Invoice`]s when received. /// /// When set to `true`, [`Event::InvoiceReceived`] will be generated for each received @@ -984,7 +1093,7 @@ impl Default for UserConfig { accept_forwards_to_priv_channels: false, accept_inbound_channels: true, manually_accept_inbound_channels: false, - accept_intercept_htlcs: false, + htlc_interception_flags: 0, manually_handle_bolt12_invoices: false, enable_dual_funded_channels: false, enable_htlc_hold: false, @@ -1007,7 +1116,7 @@ impl Readable for UserConfig { accept_forwards_to_priv_channels: Readable::read(reader)?, accept_inbound_channels: Readable::read(reader)?, manually_accept_inbound_channels: Readable::read(reader)?, - accept_intercept_htlcs: Readable::read(reader)?, + htlc_interception_flags: Readable::read(reader)?, manually_handle_bolt12_invoices: Readable::read(reader)?, enable_dual_funded_channels: Readable::read(reader)?, hold_outbound_htlcs_at_next_hop: Readable::read(reader)?, From ce91315bb057d6707ea89c55d44b55e0e0ce30a6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 23 Jan 2026 02:15:04 +0000 Subject: [PATCH 7/7] Expose the outgoing HTLC's CLTV expiry in `Event::HTLCIntercepted` In the previous commit our interception documentation noted that in some cases devs may wish to validate the CLTV expiry of HTLCs before forwarding. Here we enable that by exposing the next-hop's CLTV value in `Event::HTLCIntercepted` --- lightning/src/events/mod.rs | 12 ++++++++++++ lightning/src/ln/channelmanager.rs | 1 + lightning/src/ln/payment_tests.rs | 4 ++++ 3 files changed, 17 insertions(+) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index 277ce612494..b029caa30d7 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -1287,6 +1287,13 @@ pub enum Event { /// [`Self::HTLCIntercepted::inbound_amount_msat`]) or subtract it as required. Further, /// LDK will not stop you from forwarding more than you received. expected_outbound_amount_msat: u64, + /// The block height at which the forwarded HTLC sent to our peer will time out. In + /// practice, LDK will refuse to forward an HTLC several blocks before this height (as if + /// we attempted to forward an HTLC at this height we'd run some risk that our peer + /// force-closes the channel immediately). + /// + /// This will only be `None` for events generated or serialized by LDK 0.2 or prior. + outgoing_htlc_expiry_block_height: Option, }, /// Used to indicate that an output which you should know how to spend was confirmed on chain /// and is now spendable. @@ -2017,11 +2024,13 @@ impl Writeable for Event { inbound_amount_msat, expected_outbound_amount_msat, intercept_id, + outgoing_htlc_expiry_block_height, } => { 6u8.write(writer)?; let intercept_scid = InterceptNextHop::FakeScid { requested_next_hop_scid }; write_tlv_fields!(writer, { (0, intercept_id, required), + (1, outgoing_htlc_expiry_block_height, option), (2, intercept_scid, required), (4, payment_hash, required), (6, inbound_amount_msat, required), @@ -2526,8 +2535,10 @@ impl MaybeReadable for Event { InterceptNextHop::FakeScid { requested_next_hop_scid: 0 }; let mut inbound_amount_msat = 0; let mut expected_outbound_amount_msat = 0; + let mut outgoing_htlc_expiry_block_height = None; read_tlv_fields!(reader, { (0, intercept_id, required), + (1, outgoing_htlc_expiry_block_height, option), (2, requested_next_hop_scid, required), (4, payment_hash, required), (6, inbound_amount_msat, required), @@ -2542,6 +2553,7 @@ impl MaybeReadable for Event { inbound_amount_msat, expected_outbound_amount_msat, intercept_id, + outgoing_htlc_expiry_block_height, })) }, 7u8 => { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index eeb5a536483..fd5e5d15b9f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3409,6 +3409,7 @@ fn create_htlc_intercepted_event( inbound_amount_msat, expected_outbound_amount_msat: pending_add.forward_info.outgoing_amt_msat, intercept_id, + outgoing_htlc_expiry_block_height: Some(pending_add.forward_info.outgoing_cltv_value), }) } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 8ac87fbdb10..14446239a31 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -2277,6 +2277,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); + let expected_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; let (intercept_id, outbound_amt) = match events[0] { crate::events::Event::HTLCIntercepted { intercept_id, @@ -2284,10 +2285,12 @@ fn do_test_intercepted_payment(test: InterceptTest) { payment_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id, + outgoing_htlc_expiry_block_height, } => { assert_eq!(payment_hash, hash); assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees()); assert_eq!(short_channel_id, intercept_scid); + assert_eq!(outgoing_htlc_expiry_block_height.unwrap(), expected_cltv); (intercept_id, expected_outbound_amount_msat) }, _ => panic!(), @@ -2356,6 +2359,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; + assert_eq!(payment_event.msgs[0].cltv_expiry, expected_cltv); nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); let commitment = &payment_event.commitment_msg; do_commitment_signed_dance(&nodes[2], &nodes[1], commitment, false, true);